2024-11-14 09:53:16,150 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 09:53:16,168 main DEBUG Took 0.015536 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-14 09:53:16,169 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-14 09:53:16,169 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-14 09:53:16,171 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-14 09:53:16,173 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,183 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-14 09:53:16,200 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,202 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,203 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,204 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,204 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,206 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,206 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,207 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,207 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,208 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,209 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,209 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,210 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,211 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,211 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,212 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,212 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,213 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,214 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,215 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,215 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,216 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,216 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-14 09:53:16,217 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,218 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-14 09:53:16,220 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-14 09:53:16,222 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-14 09:53:16,224 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-14 09:53:16,225 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-14 09:53:16,227 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-14 09:53:16,228 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-14 09:53:16,238 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-14 09:53:16,241 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-14 09:53:16,243 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-14 09:53:16,243 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-14 09:53:16,244 main DEBUG createAppenders(={Console}) 2024-11-14 09:53:16,244 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-14 09:53:16,245 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-14 09:53:16,245 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-14 09:53:16,246 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-14 09:53:16,246 main DEBUG OutputStream closed 2024-11-14 09:53:16,246 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-14 09:53:16,247 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-14 09:53:16,247 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-14 09:53:16,318 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-14 09:53:16,320 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-14 09:53:16,321 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-14 09:53:16,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-14 09:53:16,322 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-14 09:53:16,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-14 09:53:16,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-14 09:53:16,323 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-14 09:53:16,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-14 09:53:16,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-14 09:53:16,324 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-14 09:53:16,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-14 09:53:16,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-14 09:53:16,325 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-14 09:53:16,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-14 09:53:16,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-14 09:53:16,326 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-14 09:53:16,327 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-14 09:53:16,329 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14 09:53:16,330 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-14 09:53:16,330 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-14 09:53:16,331 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-14T09:53:16,581 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490 2024-11-14 09:53:16,585 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-14 09:53:16,585 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-14T09:53:16,593 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-14T09:53:16,633 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=243, ProcessCount=11, AvailableMemoryMB=5047 2024-11-14T09:53:16,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:53:16,654 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6, deleteOnExit=true 2024-11-14T09:53:16,654 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:53:16,655 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/test.cache.data in system properties and HBase conf 2024-11-14T09:53:16,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:53:16,656 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:53:16,657 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:53:16,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:53:16,658 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:53:16,757 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-14T09:53:16,892 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:53:16,897 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:53:16,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:53:16,898 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:53:16,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:53:16,899 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:53:16,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:53:16,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:53:16,900 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:53:16,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:53:16,901 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:53:16,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:53:16,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:53:16,902 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:53:16,903 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:53:17,355 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:53:18,052 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-14T09:53:18,131 INFO [Time-limited test {}] log.Log(170): Logging initialized @2765ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-14T09:53:18,192 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:53:18,243 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:53:18,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:53:18,261 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:53:18,262 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:53:18,272 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:53:18,275 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:53:18,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:53:18,447 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/java.io.tmpdir/jetty-localhost-43027-hadoop-hdfs-3_4_1-tests_jar-_-any-12468671932496979503/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:53:18,454 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:43027} 2024-11-14T09:53:18,454 INFO [Time-limited test {}] server.Server(415): Started @3089ms 2024-11-14T09:53:18,484 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:53:19,042 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:53:19,049 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:53:19,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:53:19,050 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:53:19,051 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:53:19,051 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:53:19,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:53:19,150 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/java.io.tmpdir/jetty-localhost-37117-hadoop-hdfs-3_4_1-tests_jar-_-any-5856707114362653049/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:53:19,151 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:37117} 2024-11-14T09:53:19,151 INFO [Time-limited test {}] server.Server(415): Started @3786ms 2024-11-14T09:53:19,199 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:53:19,306 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:53:19,313 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:53:19,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:53:19,315 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:53:19,316 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:53:19,317 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:53:19,318 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:53:19,441 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/java.io.tmpdir/jetty-localhost-41075-hadoop-hdfs-3_4_1-tests_jar-_-any-11607122376208263584/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:53:19,442 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:41075} 2024-11-14T09:53:19,442 INFO [Time-limited test {}] server.Server(415): Started @4077ms 2024-11-14T09:53:19,445 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:53:20,731 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data3/current/BP-1836200132-172.17.0.2-1731577997431/current, will proceed with Du for space computation calculation, 2024-11-14T09:53:20,731 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data1/current/BP-1836200132-172.17.0.2-1731577997431/current, will proceed with Du for space computation calculation, 2024-11-14T09:53:20,731 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data4/current/BP-1836200132-172.17.0.2-1731577997431/current, will proceed with Du for space computation calculation, 2024-11-14T09:53:20,731 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data2/current/BP-1836200132-172.17.0.2-1731577997431/current, will proceed with Du for space computation calculation, 2024-11-14T09:53:20,760 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:53:20,760 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:53:20,805 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ab3a319a6bbe1d0 with lease ID 0xa8d34766bdbba01c: Processing first storage report for DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd from datanode DatanodeRegistration(127.0.0.1:39915, datanodeUuid=2c6065cb-6358-4129-b2a1-56867e111389, infoPort=43623, infoSecurePort=0, ipcPort=38837, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431) 2024-11-14T09:53:20,807 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ab3a319a6bbe1d0 with lease ID 0xa8d34766bdbba01c: from storage DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd node DatanodeRegistration(127.0.0.1:39915, datanodeUuid=2c6065cb-6358-4129-b2a1-56867e111389, infoPort=43623, infoSecurePort=0, ipcPort=38837, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:53:20,807 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe0269336d72940f3 with lease ID 0xa8d34766bdbba01b: Processing first storage report for DS-c59de04a-ff5d-4454-8720-f15909fc82a3 from datanode DatanodeRegistration(127.0.0.1:42831, datanodeUuid=61e601ca-99d9-402a-b885-0f1442433071, infoPort=41433, infoSecurePort=0, ipcPort=34307, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431) 2024-11-14T09:53:20,808 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe0269336d72940f3 with lease ID 0xa8d34766bdbba01b: from storage DS-c59de04a-ff5d-4454-8720-f15909fc82a3 node DatanodeRegistration(127.0.0.1:42831, datanodeUuid=61e601ca-99d9-402a-b885-0f1442433071, infoPort=41433, infoSecurePort=0, ipcPort=34307, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:53:20,808 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6ab3a319a6bbe1d0 with lease ID 0xa8d34766bdbba01c: Processing first storage report for DS-380cd07b-f6f5-47dc-9b26-7ea05d043e7d from datanode DatanodeRegistration(127.0.0.1:39915, datanodeUuid=2c6065cb-6358-4129-b2a1-56867e111389, infoPort=43623, infoSecurePort=0, ipcPort=38837, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431) 2024-11-14T09:53:20,808 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6ab3a319a6bbe1d0 with lease ID 0xa8d34766bdbba01c: from storage DS-380cd07b-f6f5-47dc-9b26-7ea05d043e7d node DatanodeRegistration(127.0.0.1:39915, datanodeUuid=2c6065cb-6358-4129-b2a1-56867e111389, infoPort=43623, infoSecurePort=0, ipcPort=38837, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:53:20,809 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe0269336d72940f3 with lease ID 0xa8d34766bdbba01b: Processing first storage report for DS-5370494a-8aff-4cfd-9902-ae58bddf294d from datanode DatanodeRegistration(127.0.0.1:42831, datanodeUuid=61e601ca-99d9-402a-b885-0f1442433071, infoPort=41433, infoSecurePort=0, ipcPort=34307, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431) 2024-11-14T09:53:20,809 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe0269336d72940f3 with lease ID 0xa8d34766bdbba01b: from storage DS-5370494a-8aff-4cfd-9902-ae58bddf294d node DatanodeRegistration(127.0.0.1:42831, datanodeUuid=61e601ca-99d9-402a-b885-0f1442433071, infoPort=41433, infoSecurePort=0, ipcPort=34307, storageInfo=lv=-57;cid=testClusterID;nsid=1295014209;c=1731577997431), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:53:20,849 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490 2024-11-14T09:53:20,920 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/zookeeper_0, clientPort=58969, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:53:20,930 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=58969 2024-11-14T09:53:20,939 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:20,942 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:21,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:53:21,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:53:21,541 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea with version=8 2024-11-14T09:53:21,542 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:53:21,617 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-14T09:53:21,857 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:53:21,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:21,868 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:21,873 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:53:21,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:21,874 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:53:22,009 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:53:22,062 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-14T09:53:22,071 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-14T09:53:22,075 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:53:22,097 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 64713 (auto-detected) 2024-11-14T09:53:22,098 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-14T09:53:22,116 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44945 2024-11-14T09:53:22,136 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44945 connecting to ZooKeeper ensemble=127.0.0.1:58969 2024-11-14T09:53:22,271 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449450x0, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:53:22,274 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44945-0x10138c371ae0000 connected 2024-11-14T09:53:22,369 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:22,371 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:22,382 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:53:22,385 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea, hbase.cluster.distributed=false 2024-11-14T09:53:22,406 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:53:22,410 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44945 2024-11-14T09:53:22,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44945 2024-11-14T09:53:22,411 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44945 2024-11-14T09:53:22,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44945 2024-11-14T09:53:22,412 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44945 2024-11-14T09:53:22,505 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:53:22,506 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:22,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:22,507 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:53:22,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:53:22,507 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:53:22,510 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:53:22,513 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:53:22,514 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41071 2024-11-14T09:53:22,516 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41071 connecting to ZooKeeper ensemble=127.0.0.1:58969 2024-11-14T09:53:22,518 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:22,522 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:22,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:410710x0, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:53:22,537 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:53:22,537 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41071-0x10138c371ae0001 connected 2024-11-14T09:53:22,540 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:53:22,547 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:53:22,550 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:53:22,554 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:53:22,555 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41071 2024-11-14T09:53:22,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41071 2024-11-14T09:53:22,556 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41071 2024-11-14T09:53:22,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41071 2024-11-14T09:53:22,557 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41071 2024-11-14T09:53:22,573 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:44945 2024-11-14T09:53:22,574 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,44945,1731578001707 2024-11-14T09:53:22,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:53:22,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:53:22,591 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,44945,1731578001707 2024-11-14T09:53:22,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:53:22,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:22,620 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:22,621 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:53:22,622 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,44945,1731578001707 from backup master directory 2024-11-14T09:53:22,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:53:22,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,44945,1731578001707 2024-11-14T09:53:22,631 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:53:22,631 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:53:22,631 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,44945,1731578001707 2024-11-14T09:53:22,634 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-14T09:53:22,635 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-14T09:53:22,682 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase.id] with ID: 9b446494-00cf-4b8b-8a0c-43fa63b38177 2024-11-14T09:53:22,682 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/.tmp/hbase.id 2024-11-14T09:53:22,692 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:53:22,693 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:53:22,694 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/.tmp/hbase.id]:[hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase.id] 2024-11-14T09:53:22,733 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:22,739 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:53:22,757 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 16ms. 2024-11-14T09:53:22,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:22,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:22,784 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:53:22,785 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:53:22,802 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:53:22,804 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:53:22,810 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:53:22,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:53:22,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:53:22,856 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store 2024-11-14T09:53:22,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:53:22,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:53:22,879 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-14T09:53:22,882 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:22,883 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:53:22,883 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:53:22,884 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:53:22,885 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:53:22,886 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:53:22,886 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:53:22,887 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578002883Disabling compacts and flushes for region at 1731578002883Disabling writes for close at 1731578002885 (+2 ms)Writing region close event to WAL at 1731578002886 (+1 ms)Closed at 1731578002886 2024-11-14T09:53:22,889 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/.initializing 2024-11-14T09:53:22,889 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/WALs/defc576eb6b7,44945,1731578001707 2024-11-14T09:53:22,910 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C44945%2C1731578001707, suffix=, logDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/WALs/defc576eb6b7,44945,1731578001707, archiveDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/oldWALs, maxLogs=10 2024-11-14T09:53:22,921 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C44945%2C1731578001707.1731578002916 2024-11-14T09:53:22,940 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/WALs/defc576eb6b7,44945,1731578001707/defc576eb6b7%2C44945%2C1731578001707.1731578002916 2024-11-14T09:53:22,947 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41433:41433),(127.0.0.1/127.0.0.1:43623:43623)] 2024-11-14T09:53:22,948 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:53:22,948 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:22,952 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:22,953 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:22,987 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,011 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:53:23,014 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:23,017 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,021 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:53:23,021 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:53:23,023 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,026 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:53:23,027 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:53:23,028 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,032 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:53:23,032 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,034 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:53:23,034 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,038 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,039 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,046 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,046 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,050 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:53:23,054 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:53:23,058 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:53:23,060 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783494, jitterRate=-0.0037365704774856567}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:53:23,067 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578002964Initializing all the Stores at 1731578002967 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578002967Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578002968 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578002968Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578002968Cleaning up temporary data from old regions at 1731578003047 (+79 ms)Region opened successfully at 1731578003066 (+19 ms) 2024-11-14T09:53:23,068 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:53:23,100 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2de223ce, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:53:23,127 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:53:23,136 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:53:23,136 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:53:23,139 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:53:23,140 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-14T09:53:23,146 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-14T09:53:23,146 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:53:23,173 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:53:23,181 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:53:23,230 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:53:23,234 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:53:23,237 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:53:23,250 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:53:23,254 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:53:23,260 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:53:23,272 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:53:23,274 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:53:23,283 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:53:23,303 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:53:23,314 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:53:23,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:53:23,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:53:23,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,329 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,44945,1731578001707, sessionid=0x10138c371ae0000, setting cluster-up flag (Was=false) 2024-11-14T09:53:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,389 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:53:23,393 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,44945,1731578001707 2024-11-14T09:53:23,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,420 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:23,451 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:53:23,455 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,44945,1731578001707 2024-11-14T09:53:23,464 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:53:23,529 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:53:23,538 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:53:23,544 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:53:23,549 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,44945,1731578001707 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:53:23,555 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:53:23,555 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:53:23,556 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,557 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578033557 2024-11-14T09:53:23,559 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:53:23,560 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:53:23,561 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:53:23,561 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(746): ClusterId : 9b446494-00cf-4b8b-8a0c-43fa63b38177 2024-11-14T09:53:23,562 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:53:23,563 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:53:23,564 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:53:23,564 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:53:23,564 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:53:23,564 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:53:23,565 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,568 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,568 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:53:23,568 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:53:23,569 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:53:23,570 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:53:23,572 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:53:23,572 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:53:23,574 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578003573,5,FailOnTimeoutGroup] 2024-11-14T09:53:23,574 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578003574,5,FailOnTimeoutGroup] 2024-11-14T09:53:23,575 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,575 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:53:23,576 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,576 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,580 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:53:23,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:53:23,581 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:53:23,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:53:23,582 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:53:23,583 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea 2024-11-14T09:53:23,590 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:53:23,590 DEBUG [RS:0;defc576eb6b7:41071 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@631c9b41, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:53:23,598 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:53:23,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:53:23,601 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:23,603 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:53:23,606 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:41071 2024-11-14T09:53:23,607 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:53:23,607 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:23,608 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:53:23,610 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:53:23,610 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:53:23,610 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:53:23,612 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:53:23,612 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:23,613 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,44945,1731578001707 with port=41071, startcode=1731578002473 2024-11-14T09:53:23,613 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:53:23,616 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:53:23,617 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:23,618 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:53:23,621 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:53:23,621 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:23,622 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:23,623 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:53:23,624 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740 2024-11-14T09:53:23,625 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740 2024-11-14T09:53:23,627 DEBUG [RS:0;defc576eb6b7:41071 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:53:23,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:53:23,629 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:53:23,630 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:53:23,632 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:53:23,637 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:53:23,638 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=842052, jitterRate=0.07072494924068451}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:53:23,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578003601Initializing all the Stores at 1731578003603 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578003603Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578003603Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578003603Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578003603Cleaning up temporary data from old regions at 1731578003629 (+26 ms)Region opened successfully at 1731578003642 (+13 ms) 2024-11-14T09:53:23,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:53:23,642 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:53:23,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:53:23,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:53:23,642 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:53:23,643 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:53:23,644 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578003642Disabling compacts and flushes for region at 1731578003642Disabling writes for close at 1731578003642Writing region close event to WAL at 1731578003643 (+1 ms)Closed at 1731578003643 2024-11-14T09:53:23,646 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:53:23,647 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:53:23,652 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:53:23,660 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:53:23,663 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:53:23,687 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39585, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:53:23,692 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44945 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,695 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44945 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,707 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea 2024-11-14T09:53:23,707 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38163 2024-11-14T09:53:23,708 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:53:23,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:53:23,715 DEBUG [RS:0;defc576eb6b7:41071 {}] zookeeper.ZKUtil(111): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,715 WARN [RS:0;defc576eb6b7:41071 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:53:23,716 INFO [RS:0;defc576eb6b7:41071 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:53:23,716 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,718 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,41071,1731578002473] 2024-11-14T09:53:23,740 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:53:23,754 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:53:23,758 INFO [RS:0;defc576eb6b7:41071 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:53:23,758 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,759 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:53:23,764 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:53:23,765 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:53:23,766 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:53:23,767 DEBUG [RS:0;defc576eb6b7:41071 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:53:23,768 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,768 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,769 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,769 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,769 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,769 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,41071,1731578002473-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:53:23,784 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:53:23,786 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,41071,1731578002473-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,786 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,787 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.Replication(171): defc576eb6b7,41071,1731578002473 started 2024-11-14T09:53:23,802 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:23,803 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,41071,1731578002473, RpcServer on defc576eb6b7/172.17.0.2:41071, sessionid=0x10138c371ae0001 2024-11-14T09:53:23,803 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:53:23,804 DEBUG [RS:0;defc576eb6b7:41071 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,804 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,41071,1731578002473' 2024-11-14T09:53:23,804 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:53:23,805 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:53:23,806 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:53:23,806 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:53:23,806 DEBUG [RS:0;defc576eb6b7:41071 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,41071,1731578002473 2024-11-14T09:53:23,806 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,41071,1731578002473' 2024-11-14T09:53:23,806 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:53:23,807 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:53:23,807 DEBUG [RS:0;defc576eb6b7:41071 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:53:23,807 INFO [RS:0;defc576eb6b7:41071 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:53:23,807 INFO [RS:0;defc576eb6b7:41071 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:53:23,813 WARN [defc576eb6b7:44945 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:53:23,917 INFO [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C41071%2C1731578002473, suffix=, logDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473, archiveDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs, maxLogs=32 2024-11-14T09:53:23,921 INFO [RS:0;defc576eb6b7:41071 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578003921 2024-11-14T09:53:23,929 INFO [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578003921 2024-11-14T09:53:23,931 DEBUG [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41433:41433),(127.0.0.1/127.0.0.1:43623:43623)] 2024-11-14T09:53:24,066 DEBUG [defc576eb6b7:44945 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:53:24,079 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:24,085 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,41071,1731578002473, state=OPENING 2024-11-14T09:53:24,093 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:53:24,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:24,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:53:24,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:53:24,105 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:53:24,107 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:53:24,110 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,41071,1731578002473}] 2024-11-14T09:53:24,286 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:53:24,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49465, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:53:24,299 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:53:24,300 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:53:24,303 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C41071%2C1731578002473.meta, suffix=.meta, logDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473, archiveDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs, maxLogs=32 2024-11-14T09:53:24,305 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.meta.1731578004305.meta 2024-11-14T09:53:24,312 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.meta.1731578004305.meta 2024-11-14T09:53:24,314 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:53:24,315 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:53:24,317 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:53:24,319 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:53:24,323 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:53:24,327 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:53:24,328 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:24,328 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:53:24,328 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:53:24,331 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:53:24,332 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:53:24,333 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:24,333 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:24,334 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:53:24,335 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:53:24,335 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:24,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:24,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:53:24,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:53:24,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:24,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:24,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:53:24,340 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:53:24,340 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:24,341 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:53:24,341 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:53:24,343 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740 2024-11-14T09:53:24,345 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740 2024-11-14T09:53:24,347 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:53:24,347 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:53:24,348 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:53:24,350 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:53:24,352 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=877448, jitterRate=0.11573316156864166}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:53:24,352 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:53:24,353 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578004328Writing region info on filesystem at 1731578004329 (+1 ms)Initializing all the Stores at 1731578004330 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578004330Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578004331 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578004331Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578004331Cleaning up temporary data from old regions at 1731578004347 (+16 ms)Running coprocessor post-open hooks at 1731578004352 (+5 ms)Region opened successfully at 1731578004353 (+1 ms) 2024-11-14T09:53:24,359 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578004279 2024-11-14T09:53:24,368 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:53:24,368 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:53:24,370 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:24,372 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,41071,1731578002473, state=OPEN 2024-11-14T09:53:24,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:53:24,403 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:53:24,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:53:24,404 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:53:24,404 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:24,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:53:24,414 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,41071,1731578002473 in 296 msec 2024-11-14T09:53:24,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:53:24,420 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 764 msec 2024-11-14T09:53:24,422 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:53:24,422 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:53:24,441 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:53:24,442 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,41071,1731578002473, seqNum=-1] 2024-11-14T09:53:24,458 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:53:24,460 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42933, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:53:24,479 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 987 msec 2024-11-14T09:53:24,480 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578004480, completionTime=-1 2024-11-14T09:53:24,483 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:53:24,483 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:53:24,509 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:53:24,510 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578064510 2024-11-14T09:53:24,510 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578124510 2024-11-14T09:53:24,510 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 26 msec 2024-11-14T09:53:24,512 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,512 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,512 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,514 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:44945, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,514 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,515 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,520 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:53:24,545 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.912sec 2024-11-14T09:53:24,546 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:53:24,547 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:53:24,548 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:53:24,549 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:53:24,549 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:53:24,550 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:53:24,550 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:53:24,560 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:53:24,561 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:53:24,561 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44945,1731578001707-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:53:24,574 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@116576a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:53:24,576 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-14T09:53:24,577 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-14T09:53:24,580 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,44945,-1 for getting cluster id 2024-11-14T09:53:24,583 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:53:24,592 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '9b446494-00cf-4b8b-8a0c-43fa63b38177' 2024-11-14T09:53:24,595 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:53:24,595 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "9b446494-00cf-4b8b-8a0c-43fa63b38177" 2024-11-14T09:53:24,598 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@34d065df, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:53:24,598 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,44945,-1] 2024-11-14T09:53:24,601 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:53:24,604 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:53:24,605 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:53:24,608 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@151fab88, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:53:24,608 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:53:24,616 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,41071,1731578002473, seqNum=-1] 2024-11-14T09:53:24,616 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:53:24,619 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36452, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:53:24,637 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,44945,1731578001707 2024-11-14T09:53:24,637 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:53:24,644 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:53:24,647 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:53:24,653 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is defc576eb6b7,44945,1731578001707 2024-11-14T09:53:24,655 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@2b7a956b 2024-11-14T09:53:24,656 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:53:24,658 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60234, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:53:24,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:53:24,660 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:53:24,664 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:53:24,671 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:53:24,674 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:53:24,676 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-14T09:53:24,676 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:24,679 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:53:24,701 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:53:24,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741835_1011 (size=389) 2024-11-14T09:53:24,737 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741835_1011 (size=389) 2024-11-14T09:53:24,740 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 9189b0e28d438732437bcbb2f4ebe44e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea 2024-11-14T09:53:24,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741836_1012 (size=72) 2024-11-14T09:53:24,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741836_1012 (size=72) 2024-11-14T09:53:24,750 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:24,751 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 9189b0e28d438732437bcbb2f4ebe44e, disabling compactions & flushes 2024-11-14T09:53:24,751 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:24,751 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:24,751 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. after waiting 0 ms 2024-11-14T09:53:24,751 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:24,751 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:24,751 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 9189b0e28d438732437bcbb2f4ebe44e: Waiting for close lock at 1731578004751Disabling compacts and flushes for region at 1731578004751Disabling writes for close at 1731578004751Writing region close event to WAL at 1731578004751Closed at 1731578004751 2024-11-14T09:53:24,753 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:53:24,758 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1731578004753"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578004753"}]},"ts":"1731578004753"} 2024-11-14T09:53:24,763 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:53:24,765 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:53:24,768 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578004765"}]},"ts":"1731578004765"} 2024-11-14T09:53:24,772 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-14T09:53:24,774 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9189b0e28d438732437bcbb2f4ebe44e, ASSIGN}] 2024-11-14T09:53:24,776 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9189b0e28d438732437bcbb2f4ebe44e, ASSIGN 2024-11-14T09:53:24,778 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9189b0e28d438732437bcbb2f4ebe44e, ASSIGN; state=OFFLINE, location=defc576eb6b7,41071,1731578002473; forceNewPlan=false, retain=false 2024-11-14T09:53:24,930 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9189b0e28d438732437bcbb2f4ebe44e, regionState=OPENING, regionLocation=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:24,935 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9189b0e28d438732437bcbb2f4ebe44e, ASSIGN because future has completed 2024-11-14T09:53:24,937 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9189b0e28d438732437bcbb2f4ebe44e, server=defc576eb6b7,41071,1731578002473}] 2024-11-14T09:53:25,099 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:25,100 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 9189b0e28d438732437bcbb2f4ebe44e, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:53:25,100 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,101 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:53:25,101 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,101 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,104 INFO [StoreOpener-9189b0e28d438732437bcbb2f4ebe44e-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,107 INFO [StoreOpener-9189b0e28d438732437bcbb2f4ebe44e-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9189b0e28d438732437bcbb2f4ebe44e columnFamilyName info 2024-11-14T09:53:25,107 DEBUG [StoreOpener-9189b0e28d438732437bcbb2f4ebe44e-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:53:25,109 INFO [StoreOpener-9189b0e28d438732437bcbb2f4ebe44e-1 {}] regionserver.HStore(327): Store=9189b0e28d438732437bcbb2f4ebe44e/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:53:25,109 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,111 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,112 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,113 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,113 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,116 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,120 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:53:25,121 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 9189b0e28d438732437bcbb2f4ebe44e; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=862152, jitterRate=0.09628380835056305}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:53:25,121 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:25,122 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 9189b0e28d438732437bcbb2f4ebe44e: Running coprocessor pre-open hook at 1731578005101Writing region info on filesystem at 1731578005101Initializing all the Stores at 1731578005104 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578005104Cleaning up temporary data from old regions at 1731578005113 (+9 ms)Running coprocessor post-open hooks at 1731578005121 (+8 ms)Region opened successfully at 1731578005122 (+1 ms) 2024-11-14T09:53:25,124 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e., pid=6, masterSystemTime=1731578005092 2024-11-14T09:53:25,128 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:25,128 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:25,129 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=9189b0e28d438732437bcbb2f4ebe44e, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,41071,1731578002473 2024-11-14T09:53:25,133 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9189b0e28d438732437bcbb2f4ebe44e, server=defc576eb6b7,41071,1731578002473 because future has completed 2024-11-14T09:53:25,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:53:25,139 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 9189b0e28d438732437bcbb2f4ebe44e, server=defc576eb6b7,41071,1731578002473 in 199 msec 2024-11-14T09:53:25,144 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:53:25,144 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=9189b0e28d438732437bcbb2f4ebe44e, ASSIGN in 365 msec 2024-11-14T09:53:25,145 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:53:25,146 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578005146"}]},"ts":"1731578005146"} 2024-11-14T09:53:25,149 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-14T09:53:25,151 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:53:25,155 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 485 msec 2024-11-14T09:53:29,842 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-14T09:53:29,897 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:53:29,899 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-14T09:53:32,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:53:32,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:53:32,063 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:53:32,063 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:53:32,066 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:53:32,066 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:53:32,067 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:53:32,067 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:53:34,734 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44945 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:53:34,736 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-14T09:53:34,743 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-14T09:53:34,750 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:53:34,751 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:53:34,752 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578014751 2024-11-14T09:53:34,761 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:34,762 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:34,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:34,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:34,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:34,762 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578003921 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578014751 2024-11-14T09:53:34,765 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:53:34,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741833_1009 (size=451) 2024-11-14T09:53:34,766 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578003921 is not closed yet, will try archiving it next time 2024-11-14T09:53:34,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741833_1009 (size=451) 2024-11-14T09:53:34,772 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578003921 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578003921 2024-11-14T09:53:34,774 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e., hostname=defc576eb6b7,41071,1731578002473, seqNum=2] 2024-11-14T09:53:46,819 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41071 {}] regionserver.HRegion(8855): Flush requested on 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:53:46,825 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9189b0e28d438732437bcbb2f4ebe44e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:53:46,880 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/a4543afd25ca44d3816129ab5616b57f is 1080, key is row0001/info:/1731578014777/Put/seqid=0 2024-11-14T09:53:46,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741838_1014 (size=12509) 2024-11-14T09:53:46,891 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741838_1014 (size=12509) 2024-11-14T09:53:46,892 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/a4543afd25ca44d3816129ab5616b57f 2024-11-14T09:53:46,934 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/a4543afd25ca44d3816129ab5616b57f as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f 2024-11-14T09:53:46,945 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T09:53:46,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 129ms, sequenceid=11, compaction requested=false 2024-11-14T09:53:46,953 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9189b0e28d438732437bcbb2f4ebe44e: 2024-11-14T09:53:50,846 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:53:54,837 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578034837 2024-11-14T09:53:55,045 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:53:55,046 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:55,046 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:55,046 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:55,046 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:55,046 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:53:55,046 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578014751 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578034837 2024-11-14T09:53:55,047 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41433:41433),(127.0.0.1/127.0.0.1:43623:43623)] 2024-11-14T09:53:55,047 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578014751 is not closed yet, will try archiving it next time 2024-11-14T09:53:55,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741837_1013 (size=12399) 2024-11-14T09:53:55,049 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741837_1013 (size=12399) 2024-11-14T09:53:55,252 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:53:57,461 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:53:59,669 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:01,876 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:01,877 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41071 {}] regionserver.HRegion(8855): Flush requested on 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:54:01,878 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9189b0e28d438732437bcbb2f4ebe44e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:54:02,080 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:02,086 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/86e6946475d14404a8ae850e4ac1184a is 1080, key is row0008/info:/1731578028822/Put/seqid=0 2024-11-14T09:54:02,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741840_1016 (size=12509) 2024-11-14T09:54:02,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741840_1016 (size=12509) 2024-11-14T09:54:02,095 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/86e6946475d14404a8ae850e4ac1184a 2024-11-14T09:54:02,107 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/86e6946475d14404a8ae850e4ac1184a as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a 2024-11-14T09:54:02,117 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a, entries=7, sequenceid=21, filesize=12.2 K 2024-11-14T09:54:02,319 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:02,319 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 442ms, sequenceid=21, compaction requested=false 2024-11-14T09:54:02,319 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9189b0e28d438732437bcbb2f4ebe44e: 2024-11-14T09:54:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-14T09:54:02,320 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:02,321 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f because midkey is the same as first or last row 2024-11-14T09:54:04,083 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:04,572 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:54:04,573 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:54:06,289 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:06,292 WARN [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:06,294 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C41071%2C1731578002473:(num 1731578034837) roll requested 2024-11-14T09:54:06,294 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578046294 2024-11-14T09:54:06,509 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 211 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:06,509 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:06,509 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:06,509 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:06,510 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:06,510 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:06,510 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578034837 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578046294 2024-11-14T09:54:06,512 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:54:06,512 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578034837 is not closed yet, will try archiving it next time 2024-11-14T09:54:06,512 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578014751 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578014751 2024-11-14T09:54:06,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741839_1015 (size=7739) 2024-11-14T09:54:06,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741839_1015 (size=7739) 2024-11-14T09:54:08,494 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 200 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:10,101 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9189b0e28d438732437bcbb2f4ebe44e, had cached 0 bytes from a total of 25018 2024-11-14T09:54:10,700 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:12,908 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:15,117 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:17,122 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:54:17,123 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578057122 2024-11-14T09:54:20,846 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:54:22,139 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:22,141 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:22,141 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C41071%2C1731578002473:(num 1731578057122) roll requested 2024-11-14T09:54:22,141 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:22,142 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:22,142 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:22,142 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:22,142 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:22,142 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578046294 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578057122 2024-11-14T09:54:22,144 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41433:41433),(127.0.0.1/127.0.0.1:43623:43623)] 2024-11-14T09:54:22,144 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578046294 is not closed yet, will try archiving it next time 2024-11-14T09:54:22,144 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578062144 2024-11-14T09:54:22,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741841_1017 (size=4753) 2024-11-14T09:54:22,145 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741841_1017 (size=4753) 2024-11-14T09:54:27,147 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:27,147 WARN [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:27,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41071 {}] regionserver.HRegion(8855): Flush requested on 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:54:27,148 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9189b0e28d438732437bcbb2f4ebe44e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:54:27,153 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:27,153 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:29,149 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:54:32,152 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:32,152 WARN [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK], DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK]] 2024-11-14T09:54:32,152 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:32,153 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:32,154 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:32,154 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:32,155 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:32,155 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578057122 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578062144 2024-11-14T09:54:32,157 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:54:32,158 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578057122 is not closed yet, will try archiving it next time 2024-11-14T09:54:32,158 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C41071%2C1731578002473:(num 1731578062144) roll requested 2024-11-14T09:54:32,159 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578072158 2024-11-14T09:54:32,159 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741842_1018 (size=1569) 2024-11-14T09:54:32,160 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741842_1018 (size=1569) 2024-11-14T09:54:32,163 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/7b91f9f0574f4f2cbe71ea949c396ad2 is 1080, key is row0015/info:/1731578043880/Put/seqid=0 2024-11-14T09:54:32,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741844_1020 (size=12509) 2024-11-14T09:54:32,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741844_1020 (size=12509) 2024-11-14T09:54:32,170 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/7b91f9f0574f4f2cbe71ea949c396ad2 2024-11-14T09:54:32,181 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/7b91f9f0574f4f2cbe71ea949c396ad2 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2 2024-11-14T09:54:32,191 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2, entries=7, sequenceid=31, filesize=12.2 K 2024-11-14T09:54:37,175 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:37,175 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:37,195 INFO [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5003 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:37,195 WARN [FSHLog-0-hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea-prefix:defc576eb6b7,41071,1731578002473 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5003 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39915,DS-5ad4964d-53db-4b9e-8e46-6e5708a3d0fd,DISK], DatanodeInfoWithStorage[127.0.0.1:42831,DS-c59de04a-ff5d-4454-8720-f15909fc82a3,DISK]] 2024-11-14T09:54:37,195 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,196 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 10048ms, sequenceid=31, compaction requested=true 2024-11-14T09:54:37,196 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,196 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9189b0e28d438732437bcbb2f4ebe44e: 2024-11-14T09:54:37,196 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,196 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,197 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-14T09:54:37,197 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,197 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:37,197 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578062144 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578072158 2024-11-14T09:54:37,197 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f because midkey is the same as first or last row 2024-11-14T09:54:37,199 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:54:37,200 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578062144 is not closed yet, will try archiving it next time 2024-11-14T09:54:37,200 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578034837 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578034837 2024-11-14T09:54:37,200 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C41071%2C1731578002473:(num 1731578077200) roll requested 2024-11-14T09:54:37,200 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 9189b0e28d438732437bcbb2f4ebe44e:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:54:37,201 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578077200 2024-11-14T09:54:37,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741843_1019 (size=438) 2024-11-14T09:54:37,202 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741843_1019 (size=438) 2024-11-14T09:54:37,203 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578046294 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578046294 2024-11-14T09:54:37,203 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:54:37,204 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:54:37,205 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578057122 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578057122 2024-11-14T09:54:37,206 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578062144 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578062144 2024-11-14T09:54:37,207 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:54:37,209 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HStore(1541): 9189b0e28d438732437bcbb2f4ebe44e/info is initiating minor compaction (all files) 2024-11-14T09:54:37,209 INFO [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9189b0e28d438732437bcbb2f4ebe44e/info in TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:37,210 INFO [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2] into tmpdir=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp, totalSize=36.6 K 2024-11-14T09:54:37,211 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] compactions.Compactor(225): Compacting a4543afd25ca44d3816129ab5616b57f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731578014777 2024-11-14T09:54:37,212 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] compactions.Compactor(225): Compacting 86e6946475d14404a8ae850e4ac1184a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1731578028822 2024-11-14T09:54:37,213 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7b91f9f0574f4f2cbe71ea949c396ad2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1731578043880 2024-11-14T09:54:37,215 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,215 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,215 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,215 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,215 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578072158 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578077200 2024-11-14T09:54:37,216 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41433:41433),(127.0.0.1/127.0.0.1:43623:43623)] 2024-11-14T09:54:37,216 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578072158 is not closed yet, will try archiving it next time 2024-11-14T09:54:37,216 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41071%2C1731578002473.1731578077216 2024-11-14T09:54:37,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741845_1021 (size=93) 2024-11-14T09:54:37,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741845_1021 (size=93) 2024-11-14T09:54:37,219 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578072158 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs/defc576eb6b7%2C41071%2C1731578002473.1731578072158 2024-11-14T09:54:37,224 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,224 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,224 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,225 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,225 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:37,225 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578077200 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578077216 2024-11-14T09:54:37,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741846_1022 (size=1258) 2024-11-14T09:54:37,227 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741846_1022 (size=1258) 2024-11-14T09:54:37,228 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:43623:43623),(127.0.0.1/127.0.0.1:41433:41433)] 2024-11-14T09:54:37,228 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/WALs/defc576eb6b7,41071,1731578002473/defc576eb6b7%2C41071%2C1731578002473.1731578077200 is not closed yet, will try archiving it next time 2024-11-14T09:54:37,242 INFO [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9189b0e28d438732437bcbb2f4ebe44e#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:54:37,243 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/9140584e56db4c62b8a7fb67ed34a297 is 1080, key is row0001/info:/1731578014777/Put/seqid=0 2024-11-14T09:54:37,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741848_1024 (size=27710) 2024-11-14T09:54:37,249 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741848_1024 (size=27710) 2024-11-14T09:54:37,261 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/9140584e56db4c62b8a7fb67ed34a297 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/9140584e56db4c62b8a7fb67ed34a297 2024-11-14T09:54:37,278 INFO [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 9189b0e28d438732437bcbb2f4ebe44e/info of 9189b0e28d438732437bcbb2f4ebe44e into 9140584e56db4c62b8a7fb67ed34a297(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:54:37,278 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9189b0e28d438732437bcbb2f4ebe44e: 2024-11-14T09:54:37,280 INFO [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e., storeName=9189b0e28d438732437bcbb2f4ebe44e/info, priority=13, startTime=1731578077200; duration=0sec 2024-11-14T09:54:37,280 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:54:37,280 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:37,280 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/9140584e56db4c62b8a7fb67ed34a297 because midkey is the same as first or last row 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/9140584e56db4c62b8a7fb67ed34a297 because midkey is the same as first or last row 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/9140584e56db4c62b8a7fb67ed34a297 because midkey is the same as first or last row 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:54:37,281 DEBUG [RS:0;defc576eb6b7:41071-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9189b0e28d438732437bcbb2f4ebe44e:info 2024-11-14T09:54:49,254 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41071 {}] regionserver.HRegion(8855): Flush requested on 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:54:49,255 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 9189b0e28d438732437bcbb2f4ebe44e 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:54:49,262 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f3a194b6999042dc95196b39583fdab3 is 1080, key is row0022/info:/1731578077218/Put/seqid=0 2024-11-14T09:54:49,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741849_1025 (size=12509) 2024-11-14T09:54:49,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741849_1025 (size=12509) 2024-11-14T09:54:49,268 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f3a194b6999042dc95196b39583fdab3 2024-11-14T09:54:49,278 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f3a194b6999042dc95196b39583fdab3 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/f3a194b6999042dc95196b39583fdab3 2024-11-14T09:54:49,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/f3a194b6999042dc95196b39583fdab3, entries=7, sequenceid=42, filesize=12.2 K 2024-11-14T09:54:49,288 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 34ms, sequenceid=42, compaction requested=false 2024-11-14T09:54:49,288 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 9189b0e28d438732437bcbb2f4ebe44e: 2024-11-14T09:54:49,288 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-14T09:54:49,288 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:54:49,288 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/9140584e56db4c62b8a7fb67ed34a297 because midkey is the same as first or last row 2024-11-14T09:54:50,847 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:54:55,102 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 9189b0e28d438732437bcbb2f4ebe44e, had cached 0 bytes from a total of 40219 2024-11-14T09:54:57,269 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:54:57,269 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:54:57,269 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:54:57,274 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:54:57,275 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:54:57,275 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:54:57,275 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:54:57,275 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=233511752, stopped=false 2024-11-14T09:54:57,275 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,44945,1731578001707 2024-11-14T09:54:57,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:54:57,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:54:57,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:54:57,296 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:54:57,297 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:54:57,297 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:54:57,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:54:57,297 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:54:57,297 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:54:57,297 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:54:57,298 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,41071,1731578002473' ***** 2024-11-14T09:54:57,298 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:54:57,298 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:54:57,298 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:54:57,298 INFO [RS:0;defc576eb6b7:41071 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:54:57,298 INFO [RS:0;defc576eb6b7:41071 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:54:57,299 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(3091): Received CLOSE for 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:54:57,299 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,41071,1731578002473 2024-11-14T09:54:57,299 INFO [RS:0;defc576eb6b7:41071 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:54:57,299 INFO [RS:0;defc576eb6b7:41071 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:41071. 2024-11-14T09:54:57,299 DEBUG [RS:0;defc576eb6b7:41071 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:54:57,299 DEBUG [RS:0;defc576eb6b7:41071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:54:57,300 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:54:57,300 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9189b0e28d438732437bcbb2f4ebe44e, disabling compactions & flushes 2024-11-14T09:54:57,300 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:54:57,300 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:57,300 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. after waiting 0 ms 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:57,300 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:54:57,300 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1325): Online Regions={9189b0e28d438732437bcbb2f4ebe44e=TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:54:57,300 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 9189b0e28d438732437bcbb2f4ebe44e 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-14T09:54:57,300 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:54:57,300 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:54:57,301 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:54:57,301 DEBUG [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9189b0e28d438732437bcbb2f4ebe44e 2024-11-14T09:54:57,301 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-14T09:54:57,307 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f2451ba879e84cd3a66cf5b454eb5784 is 1080, key is row0029/info:/1731578091258/Put/seqid=0 2024-11-14T09:54:57,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741850_1026 (size=8193) 2024-11-14T09:54:57,314 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741850_1026 (size=8193) 2024-11-14T09:54:57,315 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f2451ba879e84cd3a66cf5b454eb5784 2024-11-14T09:54:57,322 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/info/3de731e2c9944b3c96575207533eb87a is 195, key is TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e./info:regioninfo/1731578005129/Put/seqid=0 2024-11-14T09:54:57,325 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/.tmp/info/f2451ba879e84cd3a66cf5b454eb5784 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/f2451ba879e84cd3a66cf5b454eb5784 2024-11-14T09:54:57,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741851_1027 (size=7016) 2024-11-14T09:54:57,330 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741851_1027 (size=7016) 2024-11-14T09:54:57,331 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/info/3de731e2c9944b3c96575207533eb87a 2024-11-14T09:54:57,335 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/f2451ba879e84cd3a66cf5b454eb5784, entries=3, sequenceid=48, filesize=8.0 K 2024-11-14T09:54:57,337 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 37ms, sequenceid=48, compaction requested=true 2024-11-14T09:54:57,338 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2] to archive 2024-11-14T09:54:57,341 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:54:57,344 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/a4543afd25ca44d3816129ab5616b57f 2024-11-14T09:54:57,346 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/86e6946475d14404a8ae850e4ac1184a 2024-11-14T09:54:57,348 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2 to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/archive/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/info/7b91f9f0574f4f2cbe71ea949c396ad2 2024-11-14T09:54:57,355 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/ns/12f29f4d07074a718f6c18f1b6a1e6c6 is 43, key is default/ns:d/1731578004464/Put/seqid=0 2024-11-14T09:54:57,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741852_1028 (size=5153) 2024-11-14T09:54:57,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741852_1028 (size=5153) 2024-11-14T09:54:57,362 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/ns/12f29f4d07074a718f6c18f1b6a1e6c6 2024-11-14T09:54:57,363 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=defc576eb6b7:44945 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T09:54:57,368 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [a4543afd25ca44d3816129ab5616b57f=12509, 86e6946475d14404a8ae850e4ac1184a=12509, 7b91f9f0574f4f2cbe71ea949c396ad2=12509] 2024-11-14T09:54:57,374 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/default/TestLogRolling-testSlowSyncLogRolling/9189b0e28d438732437bcbb2f4ebe44e/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-14T09:54:57,376 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:57,376 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9189b0e28d438732437bcbb2f4ebe44e: Waiting for close lock at 1731578097299Running coprocessor pre-close hooks at 1731578097300 (+1 ms)Disabling compacts and flushes for region at 1731578097300Disabling writes for close at 1731578097300Obtaining lock to block concurrent updates at 1731578097300Preparing flush snapshotting stores in 9189b0e28d438732437bcbb2f4ebe44e at 1731578097300Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1731578097301 (+1 ms)Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. at 1731578097302 (+1 ms)Flushing 9189b0e28d438732437bcbb2f4ebe44e/info: creating writer at 1731578097302Flushing 9189b0e28d438732437bcbb2f4ebe44e/info: appending metadata at 1731578097307 (+5 ms)Flushing 9189b0e28d438732437bcbb2f4ebe44e/info: closing flushed file at 1731578097307Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@42c1e757: reopening flushed file at 1731578097324 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 9189b0e28d438732437bcbb2f4ebe44e in 37ms, sequenceid=48, compaction requested=true at 1731578097337 (+13 ms)Writing region close event to WAL at 1731578097369 (+32 ms)Running coprocessor post-close hooks at 1731578097374 (+5 ms)Closed at 1731578097376 (+2 ms) 2024-11-14T09:54:57,377 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1731578004660.9189b0e28d438732437bcbb2f4ebe44e. 2024-11-14T09:54:57,385 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/table/d870dd5cf92c4e138355bc9239846391 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1731578005146/Put/seqid=0 2024-11-14T09:54:57,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741853_1029 (size=5396) 2024-11-14T09:54:57,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741853_1029 (size=5396) 2024-11-14T09:54:57,392 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/table/d870dd5cf92c4e138355bc9239846391 2024-11-14T09:54:57,401 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/info/3de731e2c9944b3c96575207533eb87a as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/info/3de731e2c9944b3c96575207533eb87a 2024-11-14T09:54:57,410 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/info/3de731e2c9944b3c96575207533eb87a, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T09:54:57,412 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/ns/12f29f4d07074a718f6c18f1b6a1e6c6 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/ns/12f29f4d07074a718f6c18f1b6a1e6c6 2024-11-14T09:54:57,421 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/ns/12f29f4d07074a718f6c18f1b6a1e6c6, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:54:57,423 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/.tmp/table/d870dd5cf92c4e138355bc9239846391 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/table/d870dd5cf92c4e138355bc9239846391 2024-11-14T09:54:57,433 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/table/d870dd5cf92c4e138355bc9239846391, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:54:57,435 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-11-14T09:54:57,442 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:54:57,442 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:54:57,443 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:54:57,443 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578097300Running coprocessor pre-close hooks at 1731578097300Disabling compacts and flushes for region at 1731578097300Disabling writes for close at 1731578097300Obtaining lock to block concurrent updates at 1731578097301 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731578097301Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1731578097301Flushing stores of hbase:meta,,1.1588230740 at 1731578097302 (+1 ms)Flushing 1588230740/info: creating writer at 1731578097302Flushing 1588230740/info: appending metadata at 1731578097322 (+20 ms)Flushing 1588230740/info: closing flushed file at 1731578097322Flushing 1588230740/ns: creating writer at 1731578097340 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731578097355 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731578097355Flushing 1588230740/table: creating writer at 1731578097369 (+14 ms)Flushing 1588230740/table: appending metadata at 1731578097385 (+16 ms)Flushing 1588230740/table: closing flushed file at 1731578097385Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4728e24f: reopening flushed file at 1731578097400 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@702529c6: reopening flushed file at 1731578097410 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@129a0f6b: reopening flushed file at 1731578097421 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1731578097435 (+14 ms)Writing region close event to WAL at 1731578097436 (+1 ms)Running coprocessor post-close hooks at 1731578097442 (+6 ms)Closed at 1731578097443 (+1 ms) 2024-11-14T09:54:57,443 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:54:57,501 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,41071,1731578002473; all regions closed. 2024-11-14T09:54:57,503 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,503 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,503 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,503 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,503 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,506 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741834_1010 (size=3066) 2024-11-14T09:54:57,507 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741834_1010 (size=3066) 2024-11-14T09:54:57,510 DEBUG [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs 2024-11-14T09:54:57,510 INFO [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C41071%2C1731578002473.meta:.meta(num 1731578004305) 2024-11-14T09:54:57,511 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,511 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,511 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,511 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,511 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741847_1023 (size=12695) 2024-11-14T09:54:57,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741847_1023 (size=12695) 2024-11-14T09:54:57,520 DEBUG [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/oldWALs 2024-11-14T09:54:57,520 INFO [RS:0;defc576eb6b7:41071 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C41071%2C1731578002473:(num 1731578077216) 2024-11-14T09:54:57,520 DEBUG [RS:0;defc576eb6b7:41071 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:54:57,521 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:54:57,521 INFO [RS:0;defc576eb6b7:41071 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:54:57,521 INFO [RS:0;defc576eb6b7:41071 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:54:57,521 INFO [RS:0;defc576eb6b7:41071 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:54:57,521 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:54:57,522 INFO [RS:0;defc576eb6b7:41071 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41071 2024-11-14T09:54:57,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:54:57,541 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,41071,1731578002473 2024-11-14T09:54:57,541 INFO [RS:0;defc576eb6b7:41071 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:54:57,552 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,41071,1731578002473] 2024-11-14T09:54:57,562 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,41071,1731578002473 already deleted, retry=false 2024-11-14T09:54:57,563 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,41071,1731578002473 expired; onlineServers=0 2024-11-14T09:54:57,563 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,44945,1731578001707' ***** 2024-11-14T09:54:57,563 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:54:57,563 INFO [M:0;defc576eb6b7:44945 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:54:57,563 INFO [M:0;defc576eb6b7:44945 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:54:57,563 DEBUG [M:0;defc576eb6b7:44945 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:54:57,563 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:54:57,563 DEBUG [M:0;defc576eb6b7:44945 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:54:57,563 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578003574 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578003574,5,FailOnTimeoutGroup] 2024-11-14T09:54:57,563 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578003573 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578003573,5,FailOnTimeoutGroup] 2024-11-14T09:54:57,564 INFO [M:0;defc576eb6b7:44945 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:54:57,564 INFO [M:0;defc576eb6b7:44945 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:54:57,564 DEBUG [M:0;defc576eb6b7:44945 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:54:57,564 INFO [M:0;defc576eb6b7:44945 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:54:57,564 INFO [M:0;defc576eb6b7:44945 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:54:57,565 INFO [M:0;defc576eb6b7:44945 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:54:57,565 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:54:57,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:54:57,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:54:57,573 DEBUG [M:0;defc576eb6b7:44945 {}] zookeeper.ZKUtil(347): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:54:57,573 WARN [M:0;defc576eb6b7:44945 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:54:57,574 INFO [M:0;defc576eb6b7:44945 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/.lastflushedseqids 2024-11-14T09:54:57,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741854_1030 (size=130) 2024-11-14T09:54:57,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741854_1030 (size=130) 2024-11-14T09:54:57,585 INFO [M:0;defc576eb6b7:44945 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:54:57,586 INFO [M:0;defc576eb6b7:44945 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:54:57,586 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:54:57,586 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:54:57,586 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:54:57,586 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:54:57,586 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:54:57,586 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-14T09:54:57,608 DEBUG [M:0;defc576eb6b7:44945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bebff486712423fa6983efef9a8279e is 82, key is hbase:meta,,1/info:regioninfo/1731578004369/Put/seqid=0 2024-11-14T09:54:57,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741855_1031 (size=5672) 2024-11-14T09:54:57,625 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741855_1031 (size=5672) 2024-11-14T09:54:57,628 INFO [M:0;defc576eb6b7:44945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bebff486712423fa6983efef9a8279e 2024-11-14T09:54:57,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:54:57,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41071-0x10138c371ae0001, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:54:57,652 INFO [RS:0;defc576eb6b7:41071 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:54:57,653 INFO [RS:0;defc576eb6b7:41071 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,41071,1731578002473; zookeeper connection closed. 2024-11-14T09:54:57,653 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6bb305ea {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6bb305ea 2024-11-14T09:54:57,654 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:54:57,655 DEBUG [M:0;defc576eb6b7:44945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/735f659ee4f246afb42844fad17b214b is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731578005153/Put/seqid=0 2024-11-14T09:54:57,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741856_1032 (size=6247) 2024-11-14T09:54:57,665 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741856_1032 (size=6247) 2024-11-14T09:54:57,666 INFO [M:0;defc576eb6b7:44945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/735f659ee4f246afb42844fad17b214b 2024-11-14T09:54:57,673 INFO [M:0;defc576eb6b7:44945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 735f659ee4f246afb42844fad17b214b 2024-11-14T09:54:57,695 DEBUG [M:0;defc576eb6b7:44945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859be54cbfca459faafa45838c420301 is 69, key is defc576eb6b7,41071,1731578002473/rs:state/1731578003697/Put/seqid=0 2024-11-14T09:54:57,702 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741857_1033 (size=5156) 2024-11-14T09:54:57,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741857_1033 (size=5156) 2024-11-14T09:54:57,703 INFO [M:0;defc576eb6b7:44945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859be54cbfca459faafa45838c420301 2024-11-14T09:54:57,729 DEBUG [M:0;defc576eb6b7:44945 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcecbeb28c3744fe942631e29dd640f9 is 52, key is load_balancer_on/state:d/1731578004641/Put/seqid=0 2024-11-14T09:54:57,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741858_1034 (size=5056) 2024-11-14T09:54:57,750 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741858_1034 (size=5056) 2024-11-14T09:54:57,751 INFO [M:0;defc576eb6b7:44945 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcecbeb28c3744fe942631e29dd640f9 2024-11-14T09:54:57,760 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5bebff486712423fa6983efef9a8279e as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5bebff486712423fa6983efef9a8279e 2024-11-14T09:54:57,768 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5bebff486712423fa6983efef9a8279e, entries=8, sequenceid=59, filesize=5.5 K 2024-11-14T09:54:57,769 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/735f659ee4f246afb42844fad17b214b as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/735f659ee4f246afb42844fad17b214b 2024-11-14T09:54:57,775 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:54:57,778 INFO [M:0;defc576eb6b7:44945 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 735f659ee4f246afb42844fad17b214b 2024-11-14T09:54:57,778 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/735f659ee4f246afb42844fad17b214b, entries=6, sequenceid=59, filesize=6.1 K 2024-11-14T09:54:57,780 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/859be54cbfca459faafa45838c420301 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/859be54cbfca459faafa45838c420301 2024-11-14T09:54:57,788 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/859be54cbfca459faafa45838c420301, entries=1, sequenceid=59, filesize=5.0 K 2024-11-14T09:54:57,789 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/dcecbeb28c3744fe942631e29dd640f9 as hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dcecbeb28c3744fe942631e29dd640f9 2024-11-14T09:54:57,797 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/dcecbeb28c3744fe942631e29dd640f9, entries=1, sequenceid=59, filesize=4.9 K 2024-11-14T09:54:57,798 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 212ms, sequenceid=59, compaction requested=false 2024-11-14T09:54:57,800 INFO [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:54:57,800 DEBUG [M:0;defc576eb6b7:44945 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578097586Disabling compacts and flushes for region at 1731578097586Disabling writes for close at 1731578097586Obtaining lock to block concurrent updates at 1731578097586Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578097586Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1731578097587 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578097588 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578097588Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578097608 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578097608Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578097638 (+30 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578097654 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578097654Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578097674 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578097694 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578097694Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578097711 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578097728 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578097728Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4628c71c: reopening flushed file at 1731578097758 (+30 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@621323ee: reopening flushed file at 1731578097768 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59c84d3a: reopening flushed file at 1731578097779 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32e542ae: reopening flushed file at 1731578097788 (+9 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 212ms, sequenceid=59, compaction requested=false at 1731578097798 (+10 ms)Writing region close event to WAL at 1731578097799 (+1 ms)Closed at 1731578097800 (+1 ms) 2024-11-14T09:54:57,801 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,801 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,801 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,801 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,801 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:54:57,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39915 is added to blk_1073741830_1006 (size=27973) 2024-11-14T09:54:57,804 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42831 is added to blk_1073741830_1006 (size=27973) 2024-11-14T09:54:57,804 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:54:57,805 INFO [M:0;defc576eb6b7:44945 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:54:57,805 INFO [M:0;defc576eb6b7:44945 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44945 2024-11-14T09:54:57,805 INFO [M:0;defc576eb6b7:44945 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:54:57,915 INFO [M:0;defc576eb6b7:44945 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:54:57,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:54:57,915 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44945-0x10138c371ae0000, quorum=127.0.0.1:58969, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:54:57,919 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:54:57,922 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:54:57,922 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:54:57,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:54:57,922 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,STOPPED} 2024-11-14T09:54:57,925 WARN [BP-1836200132-172.17.0.2-1731577997431 heartbeating to localhost/127.0.0.1:38163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:54:57,925 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:54:57,925 WARN [BP-1836200132-172.17.0.2-1731577997431 heartbeating to localhost/127.0.0.1:38163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1836200132-172.17.0.2-1731577997431 (Datanode Uuid 61e601ca-99d9-402a-b885-0f1442433071) service to localhost/127.0.0.1:38163 2024-11-14T09:54:57,925 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:54:57,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data3/current/BP-1836200132-172.17.0.2-1731577997431 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:54:57,927 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data4/current/BP-1836200132-172.17.0.2-1731577997431 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:54:57,928 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:54:57,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:54:57,932 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:54:57,932 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:54:57,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:54:57,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,STOPPED} 2024-11-14T09:54:57,934 WARN [BP-1836200132-172.17.0.2-1731577997431 heartbeating to localhost/127.0.0.1:38163 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:54:57,934 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:54:57,934 WARN [BP-1836200132-172.17.0.2-1731577997431 heartbeating to localhost/127.0.0.1:38163 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1836200132-172.17.0.2-1731577997431 (Datanode Uuid 2c6065cb-6358-4129-b2a1-56867e111389) service to localhost/127.0.0.1:38163 2024-11-14T09:54:57,934 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:54:57,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data1/current/BP-1836200132-172.17.0.2-1731577997431 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:54:57,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/cluster_055b7eb2-cfd8-bfc1-0e31-6ade23e6eca6/data/data2/current/BP-1836200132-172.17.0.2-1731577997431 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:54:57,936 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:54:57,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:54:57,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:54:57,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:54:57,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:54:57,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir/,STOPPED} 2024-11-14T09:54:57,962 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:54:57,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:54:58,000 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/defc576eb6b7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@69147b9 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:38163 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38163 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:38163 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38163 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/defc576eb6b7:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: regionserver/defc576eb6b7:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=115 (was 243), ProcessCount=11 (was 11), AvailableMemoryMB=5308 (was 5047) - AvailableMemoryMB LEAK? - 2024-11-14T09:54:58,006 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=115, ProcessCount=11, AvailableMemoryMB=5308 2024-11-14T09:54:58,006 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.log.dir so I do NOT create it in target/test-data/c97a262e-5406-ce1b-5311-18181e68405d 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/1c0f5b6b-b60b-2517-6790-9258cf58a490/hadoop.tmp.dir so I do NOT create it in target/test-data/c97a262e-5406-ce1b-5311-18181e68405d 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6, deleteOnExit=true 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/test.cache.data in system properties and HBase conf 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:54:58,007 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:54:58,008 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:54:58,008 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:54:58,009 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:54:58,021 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:54:58,721 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:54:58,730 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:54:58,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:54:58,735 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:54:58,735 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:54:58,735 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:54:58,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ea6e47a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:54:58,736 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@124e4130{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:54:58,833 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6db7bfac{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/java.io.tmpdir/jetty-localhost-38553-hadoop-hdfs-3_4_1-tests_jar-_-any-5225145646318840498/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:54:58,834 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f2bc681{HTTP/1.1, (http/1.1)}{localhost:38553} 2024-11-14T09:54:58,834 INFO [Time-limited test {}] server.Server(415): Started @103469ms 2024-11-14T09:54:58,847 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:54:59,293 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:54:59,299 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:54:59,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:54:59,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:54:59,301 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:54:59,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:54:59,302 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:54:59,403 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e335929{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/java.io.tmpdir/jetty-localhost-33261-hadoop-hdfs-3_4_1-tests_jar-_-any-4982725726714186298/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:54:59,404 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:33261} 2024-11-14T09:54:59,404 INFO [Time-limited test {}] server.Server(415): Started @104039ms 2024-11-14T09:54:59,406 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:54:59,464 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:54:59,472 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:54:59,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:54:59,475 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:54:59,475 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:54:59,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:54:59,476 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:54:59,580 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11ff445e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/java.io.tmpdir/jetty-localhost-35823-hadoop-hdfs-3_4_1-tests_jar-_-any-2620956692027013045/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:54:59,580 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:35823} 2024-11-14T09:54:59,580 INFO [Time-limited test {}] server.Server(415): Started @104215ms 2024-11-14T09:54:59,582 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:01,078 WARN [Thread-453 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data1/current/BP-454442519-172.17.0.2-1731578098036/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:01,078 WARN [Thread-454 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data2/current/BP-454442519-172.17.0.2-1731578098036/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:01,099 WARN [Thread-417 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54eb48a48c0ff81d with lease ID 0xbee6a729611efc2: Processing first storage report for DS-4b58eddf-0c6b-4818-acfa-47391dfc6052 from datanode DatanodeRegistration(127.0.0.1:46247, datanodeUuid=7e2bf6c9-c644-4a8a-a112-87eb21c01ad4, infoPort=46409, infoSecurePort=0, ipcPort=37621, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036) 2024-11-14T09:55:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54eb48a48c0ff81d with lease ID 0xbee6a729611efc2: from storage DS-4b58eddf-0c6b-4818-acfa-47391dfc6052 node DatanodeRegistration(127.0.0.1:46247, datanodeUuid=7e2bf6c9-c644-4a8a-a112-87eb21c01ad4, infoPort=46409, infoSecurePort=0, ipcPort=37621, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:01,106 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x54eb48a48c0ff81d with lease ID 0xbee6a729611efc2: Processing first storage report for DS-3c1c1096-865b-4aac-8417-3b0706c679f0 from datanode DatanodeRegistration(127.0.0.1:46247, datanodeUuid=7e2bf6c9-c644-4a8a-a112-87eb21c01ad4, infoPort=46409, infoSecurePort=0, ipcPort=37621, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036) 2024-11-14T09:55:01,107 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x54eb48a48c0ff81d with lease ID 0xbee6a729611efc2: from storage DS-3c1c1096-865b-4aac-8417-3b0706c679f0 node DatanodeRegistration(127.0.0.1:46247, datanodeUuid=7e2bf6c9-c644-4a8a-a112-87eb21c01ad4, infoPort=46409, infoSecurePort=0, ipcPort=37621, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:01,184 WARN [Thread-464 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data3/current/BP-454442519-172.17.0.2-1731578098036/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:01,184 WARN [Thread-465 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data4/current/BP-454442519-172.17.0.2-1731578098036/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:01,210 WARN [Thread-440 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc057881c048f160b with lease ID 0xbee6a729611efc3: Processing first storage report for DS-603bfe03-1900-4533-bec8-014ec01b2f2b from datanode DatanodeRegistration(127.0.0.1:35423, datanodeUuid=15df0cd2-2780-4303-96c8-a20d7133dd02, infoPort=36969, infoSecurePort=0, ipcPort=40597, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036) 2024-11-14T09:55:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc057881c048f160b with lease ID 0xbee6a729611efc3: from storage DS-603bfe03-1900-4533-bec8-014ec01b2f2b node DatanodeRegistration(127.0.0.1:35423, datanodeUuid=15df0cd2-2780-4303-96c8-a20d7133dd02, infoPort=36969, infoSecurePort=0, ipcPort=40597, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc057881c048f160b with lease ID 0xbee6a729611efc3: Processing first storage report for DS-f21624d5-da4d-4cd1-9607-9d19ab05a2a9 from datanode DatanodeRegistration(127.0.0.1:35423, datanodeUuid=15df0cd2-2780-4303-96c8-a20d7133dd02, infoPort=36969, infoSecurePort=0, ipcPort=40597, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036) 2024-11-14T09:55:01,213 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc057881c048f160b with lease ID 0xbee6a729611efc3: from storage DS-f21624d5-da4d-4cd1-9607-9d19ab05a2a9 node DatanodeRegistration(127.0.0.1:35423, datanodeUuid=15df0cd2-2780-4303-96c8-a20d7133dd02, infoPort=36969, infoSecurePort=0, ipcPort=40597, storageInfo=lv=-57;cid=testClusterID;nsid=234171303;c=1731578098036), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:55:01,227 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d 2024-11-14T09:55:01,240 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/zookeeper_0, clientPort=65279, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:55:01,241 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=65279 2024-11-14T09:55:01,241 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,244 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:01,260 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:01,262 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1 with version=8 2024-11-14T09:55:01,262 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:55:01,266 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:55:01,266 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:01,267 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40449 2024-11-14T09:55:01,269 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40449 connecting to ZooKeeper ensemble=127.0.0.1:65279 2024-11-14T09:55:01,329 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:404490x0, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:01,332 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40449-0x10138c4f9a00000 connected 2024-11-14T09:55:01,416 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,417 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,420 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:01,420 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1, hbase.cluster.distributed=false 2024-11-14T09:55:01,422 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:01,427 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40449 2024-11-14T09:55:01,428 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40449 2024-11-14T09:55:01,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40449 2024-11-14T09:55:01,429 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40449 2024-11-14T09:55:01,430 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40449 2024-11-14T09:55:01,446 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:55:01,447 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:01,448 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45731 2024-11-14T09:55:01,451 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:45731 connecting to ZooKeeper ensemble=127.0.0.1:65279 2024-11-14T09:55:01,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,468 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:457310x0, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:01,469 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:457310x0, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:01,469 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:45731-0x10138c4f9a00001 connected 2024-11-14T09:55:01,469 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:55:01,470 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:55:01,470 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:55:01,471 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:01,472 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45731 2024-11-14T09:55:01,472 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45731 2024-11-14T09:55:01,473 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45731 2024-11-14T09:55:01,476 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45731 2024-11-14T09:55:01,477 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45731 2024-11-14T09:55:01,489 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:40449 2024-11-14T09:55:01,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:01,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:01,500 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:55:01,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,511 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:55:01,511 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,40449,1731578101265 from backup master directory 2024-11-14T09:55:01,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:01,521 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:01,521 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:01,521 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,525 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/hbase.id] with ID: a09b9af9-f33a-431f-86b6-6bf5e4ee2861 2024-11-14T09:55:01,526 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/.tmp/hbase.id 2024-11-14T09:55:01,535 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:01,536 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:01,536 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/.tmp/hbase.id]:[hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/hbase.id] 2024-11-14T09:55:01,550 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:01,550 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:55:01,552 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:55:01,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,563 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:01,569 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:01,571 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:55:01,572 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:55:01,572 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:01,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:01,582 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store 2024-11-14T09:55:01,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:01,589 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:01,590 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:01,590 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:55:01,590 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:01,590 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:01,590 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:55:01,590 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:01,591 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:01,591 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578101590Disabling compacts and flushes for region at 1731578101590Disabling writes for close at 1731578101590Writing region close event to WAL at 1731578101590Closed at 1731578101590 2024-11-14T09:55:01,592 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/.initializing 2024-11-14T09:55:01,592 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/WALs/defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,596 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C40449%2C1731578101265, suffix=, logDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/WALs/defc576eb6b7,40449,1731578101265, archiveDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/oldWALs, maxLogs=10 2024-11-14T09:55:01,597 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C40449%2C1731578101265.1731578101596 2024-11-14T09:55:01,603 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/WALs/defc576eb6b7,40449,1731578101265/defc576eb6b7%2C40449%2C1731578101265.1731578101596 2024-11-14T09:55:01,604 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36969:36969),(127.0.0.1/127.0.0.1:46409:46409)] 2024-11-14T09:55:01,604 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:01,605 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:01,605 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,605 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,607 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:55:01,610 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:01,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:55:01,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:01,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:55:01,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:01,619 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:55:01,621 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,621 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:01,622 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,623 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,623 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,624 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,625 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,625 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:55:01,627 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:01,629 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:01,630 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825848, jitterRate=0.050121158361434937}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:55:01,631 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578101605Initializing all the Stores at 1731578101607 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578101607Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578101607Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578101607Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578101607Cleaning up temporary data from old regions at 1731578101625 (+18 ms)Region opened successfully at 1731578101631 (+6 ms) 2024-11-14T09:55:01,631 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:55:01,635 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4a723ffa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:01,636 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:55:01,636 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:55:01,636 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:55:01,636 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:55:01,637 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:55:01,638 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:55:01,638 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:55:01,640 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:55:01,641 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:55:01,646 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:55:01,647 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:55:01,648 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:55:01,657 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:55:01,658 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:55:01,659 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:55:01,668 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:55:01,669 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:55:01,678 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:55:01,683 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:55:01,689 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:55:01,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:01,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:01,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,702 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,40449,1731578101265, sessionid=0x10138c4f9a00000, setting cluster-up flag (Was=false) 2024-11-14T09:55:01,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,721 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,753 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:55:01,756 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:01,815 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:55:01,817 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,40449,1731578101265 2024-11-14T09:55:01,819 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:55:01,821 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:01,821 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:55:01,822 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:55:01,822 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,40449,1731578101265 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:01,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,825 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578131825 2024-11-14T09:55:01,825 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,826 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:55:01,827 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:55:01,827 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:01,827 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:55:01,827 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:55:01,827 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:55:01,827 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:55:01,827 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578101827,5,FailOnTimeoutGroup] 2024-11-14T09:55:01,827 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578101827,5,FailOnTimeoutGroup] 2024-11-14T09:55:01,828 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,828 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:55:01,828 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,828 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,828 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,828 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:55:01,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:01,838 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:01,840 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:55:01,840 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1 2024-11-14T09:55:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:01,853 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:01,854 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:01,856 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:01,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:01,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:01,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:01,861 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:01,861 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:01,862 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:01,864 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:01,864 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:01,865 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:01,867 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:01,867 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:01,868 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:01,868 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:01,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740 2024-11-14T09:55:01,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740 2024-11-14T09:55:01,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:01,872 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:01,873 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:01,874 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:01,883 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:01,883 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(746): ClusterId : a09b9af9-f33a-431f-86b6-6bf5e4ee2861 2024-11-14T09:55:01,883 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:55:01,883 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=728632, jitterRate=-0.07349759340286255}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:01,885 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578101854Initializing all the Stores at 1731578101855 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578101855Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578101856 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578101856Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578101856Cleaning up temporary data from old regions at 1731578101872 (+16 ms)Region opened successfully at 1731578101885 (+13 ms) 2024-11-14T09:55:01,885 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:55:01,886 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:55:01,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:55:01,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:55:01,886 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:55:01,907 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:01,907 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578101885Disabling compacts and flushes for region at 1731578101885Disabling writes for close at 1731578101886 (+1 ms)Writing region close event to WAL at 1731578101907 (+21 ms)Closed at 1731578101907 2024-11-14T09:55:01,908 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:55:01,908 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:55:01,910 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:01,910 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:55:01,911 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:55:01,917 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:01,920 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:55:01,921 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:55:01,922 DEBUG [RS:0;defc576eb6b7:45731 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@388cfa43, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:01,942 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:45731 2024-11-14T09:55:01,942 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:55:01,942 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:55:01,942 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:55:01,943 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,40449,1731578101265 with port=45731, startcode=1731578101446 2024-11-14T09:55:01,943 DEBUG [RS:0;defc576eb6b7:45731 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:55:01,946 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45383, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:55:01,947 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40449 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,45731,1731578101446 2024-11-14T09:55:01,947 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40449 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,45731,1731578101446 2024-11-14T09:55:01,950 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1 2024-11-14T09:55:01,950 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:35719 2024-11-14T09:55:01,950 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:55:01,963 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:01,963 DEBUG [RS:0;defc576eb6b7:45731 {}] zookeeper.ZKUtil(111): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,45731,1731578101446 2024-11-14T09:55:01,963 WARN [RS:0;defc576eb6b7:45731 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:01,963 INFO [RS:0;defc576eb6b7:45731 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:01,964 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,45731,1731578101446] 2024-11-14T09:55:01,964 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/WALs/defc576eb6b7,45731,1731578101446 2024-11-14T09:55:01,971 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:55:01,975 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:55:01,975 INFO [RS:0;defc576eb6b7:45731 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:55:01,975 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,976 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:55:01,977 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:55:01,977 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,977 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,977 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,978 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:01,979 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:01,979 DEBUG [RS:0;defc576eb6b7:45731 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,979 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,45731,1731578101446-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:01,997 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:55:01,997 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,45731,1731578101446-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,997 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:01,997 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.Replication(171): defc576eb6b7,45731,1731578101446 started 2024-11-14T09:55:02,013 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,013 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,45731,1731578101446, RpcServer on defc576eb6b7/172.17.0.2:45731, sessionid=0x10138c4f9a00001 2024-11-14T09:55:02,013 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:55:02,013 DEBUG [RS:0;defc576eb6b7:45731 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,014 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,45731,1731578101446' 2024-11-14T09:55:02,014 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:55:02,014 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,45731,1731578101446' 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:55:02,015 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:55:02,016 DEBUG [RS:0;defc576eb6b7:45731 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:55:02,016 INFO [RS:0;defc576eb6b7:45731 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:55:02,016 INFO [RS:0;defc576eb6b7:45731 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:55:02,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:55:02,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:55:02,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-14T09:55:02,070 WARN [defc576eb6b7:40449 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:55:02,119 INFO [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C45731%2C1731578101446, suffix=, logDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/WALs/defc576eb6b7,45731,1731578101446, archiveDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/oldWALs, maxLogs=32 2024-11-14T09:55:02,122 INFO [RS:0;defc576eb6b7:45731 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C45731%2C1731578101446.1731578102121 2024-11-14T09:55:02,129 INFO [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/WALs/defc576eb6b7,45731,1731578101446/defc576eb6b7%2C45731%2C1731578101446.1731578102121 2024-11-14T09:55:02,133 DEBUG [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46409:46409),(127.0.0.1/127.0.0.1:36969:36969)] 2024-11-14T09:55:02,320 DEBUG [defc576eb6b7:40449 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:55:02,321 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,323 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,45731,1731578101446, state=OPENING 2024-11-14T09:55:02,373 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:55:02,378 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:02,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:02,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:02,384 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:02,384 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:02,384 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:02,384 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:02,384 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,45731,1731578101446}] 2024-11-14T09:55:02,539 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:55:02,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42683, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:55:02,546 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:55:02,547 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:02,549 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C45731%2C1731578101446.meta, suffix=.meta, logDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/WALs/defc576eb6b7,45731,1731578101446, archiveDir=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/oldWALs, maxLogs=32 2024-11-14T09:55:02,552 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C45731%2C1731578101446.meta.1731578102552.meta 2024-11-14T09:55:02,561 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/WALs/defc576eb6b7,45731,1731578101446/defc576eb6b7%2C45731%2C1731578101446.meta.1731578102552.meta 2024-11-14T09:55:02,562 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36969:36969),(127.0.0.1/127.0.0.1:46409:46409)] 2024-11-14T09:55:02,563 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:02,563 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:55:02,563 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:55:02,563 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:55:02,563 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:55:02,563 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:02,564 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:55:02,564 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:55:02,565 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:02,566 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:02,566 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:02,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:02,567 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:02,568 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:02,568 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:02,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:02,569 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:02,570 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:02,570 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:02,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:02,571 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:02,572 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:02,572 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:02,572 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:02,573 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:02,574 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740 2024-11-14T09:55:02,575 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740 2024-11-14T09:55:02,577 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:02,577 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:02,578 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:02,580 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:02,581 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=706238, jitterRate=-0.10197240114212036}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:02,581 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:55:02,582 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578102564Writing region info on filesystem at 1731578102564Initializing all the Stores at 1731578102565 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578102565Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578102565Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578102565Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578102565Cleaning up temporary data from old regions at 1731578102577 (+12 ms)Running coprocessor post-open hooks at 1731578102581 (+4 ms)Region opened successfully at 1731578102582 (+1 ms) 2024-11-14T09:55:02,584 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578102538 2024-11-14T09:55:02,587 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:55:02,587 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:55:02,588 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,589 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,45731,1731578101446, state=OPEN 2024-11-14T09:55:02,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:02,625 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:02,626 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:02,626 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:02,626 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,631 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:55:02,632 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,45731,1731578101446 in 242 msec 2024-11-14T09:55:02,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:55:02,635 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 720 msec 2024-11-14T09:55:02,636 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:02,636 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:55:02,638 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:02,638 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,45731,1731578101446, seqNum=-1] 2024-11-14T09:55:02,638 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:02,640 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40757, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:02,648 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 826 msec 2024-11-14T09:55:02,648 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578102648, completionTime=-1 2024-11-14T09:55:02,648 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:55:02,648 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:55:02,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:55:02,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578162651 2024-11-14T09:55:02,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578222651 2024-11-14T09:55:02,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:40449, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,655 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.139sec 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:02,660 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:55:02,665 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:55:02,665 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:55:02,665 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,40449,1731578101265-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:02,683 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@439a6e0b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:02,683 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,40449,-1 for getting cluster id 2024-11-14T09:55:02,683 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:55:02,685 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'a09b9af9-f33a-431f-86b6-6bf5e4ee2861' 2024-11-14T09:55:02,686 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:55:02,686 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "a09b9af9-f33a-431f-86b6-6bf5e4ee2861" 2024-11-14T09:55:02,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@300aa63e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:02,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,40449,-1] 2024-11-14T09:55:02,687 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:55:02,688 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,690 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50556, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:55:02,692 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@29476dfa, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:02,692 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:02,694 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,45731,1731578101446, seqNum=-1] 2024-11-14T09:55:02,695 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:02,697 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53098, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:02,699 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,40449,1731578101265 2024-11-14T09:55:02,700 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:02,705 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:55:02,705 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:55:02,705 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:55:02,705 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:02,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,706 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,706 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:55:02,706 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1035653102, stopped=false 2024-11-14T09:55:02,706 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,40449,1731578101265 2024-11-14T09:55:02,706 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:55:02,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:02,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:02,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:02,728 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:02,728 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:55:02,728 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:55:02,729 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:02,729 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,729 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,45731,1731578101446' ***** 2024-11-14T09:55:02,729 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:55:02,729 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:55:02,729 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:55:02,729 INFO [RS:0;defc576eb6b7:45731 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:55:02,729 INFO [RS:0;defc576eb6b7:45731 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:45731. 2024-11-14T09:55:02,730 DEBUG [RS:0;defc576eb6b7:45731 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:02,730 DEBUG [RS:0;defc576eb6b7:45731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:55:02,730 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:55:02,730 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:02,731 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:02,731 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T09:55:02,731 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:55:02,731 DEBUG [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T09:55:02,731 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:55:02,732 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:55:02,732 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:55:02,732 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:55:02,732 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:55:02,732 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T09:55:02,754 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/.tmp/ns/30655a1a1a59468e863adcf4827783f4 is 43, key is default/ns:d/1731578102640/Put/seqid=0 2024-11-14T09:55:02,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741835_1011 (size=5153) 2024-11-14T09:55:02,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741835_1011 (size=5153) 2024-11-14T09:55:02,765 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/.tmp/ns/30655a1a1a59468e863adcf4827783f4 2024-11-14T09:55:02,781 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/.tmp/ns/30655a1a1a59468e863adcf4827783f4 as hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/ns/30655a1a1a59468e863adcf4827783f4 2024-11-14T09:55:02,795 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/ns/30655a1a1a59468e863adcf4827783f4, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T09:55:02,797 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 65ms, sequenceid=6, compaction requested=false 2024-11-14T09:55:02,797 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:55:02,804 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T09:55:02,805 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:55:02,805 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:02,805 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578102731Running coprocessor pre-close hooks at 1731578102731Disabling compacts and flushes for region at 1731578102731Disabling writes for close at 1731578102732 (+1 ms)Obtaining lock to block concurrent updates at 1731578102732Preparing flush snapshotting stores in 1588230740 at 1731578102732Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731578102733 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731578102736 (+3 ms)Flushing 1588230740/ns: creating writer at 1731578102736Flushing 1588230740/ns: appending metadata at 1731578102753 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1731578102753Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1177fd47: reopening flushed file at 1731578102778 (+25 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 65ms, sequenceid=6, compaction requested=false at 1731578102797 (+19 ms)Writing region close event to WAL at 1731578102798 (+1 ms)Running coprocessor post-close hooks at 1731578102804 (+6 ms)Closed at 1731578102805 (+1 ms) 2024-11-14T09:55:02,805 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:02,899 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:55:02,902 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:02,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:02,932 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,45731,1731578101446; all regions closed. 2024-11-14T09:55:02,936 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,936 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,937 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,937 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,937 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,942 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:55:02,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:55:02,953 DEBUG [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/oldWALs 2024-11-14T09:55:02,953 INFO [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C45731%2C1731578101446.meta:.meta(num 1731578102552) 2024-11-14T09:55:02,953 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,954 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,954 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,954 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,954 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:55:02,956 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:55:02,959 DEBUG [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/oldWALs 2024-11-14T09:55:02,959 INFO [RS:0;defc576eb6b7:45731 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C45731%2C1731578101446:(num 1731578102121) 2024-11-14T09:55:02,959 DEBUG [RS:0;defc576eb6b7:45731 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:02,959 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:02,959 INFO [RS:0;defc576eb6b7:45731 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:55:02,960 INFO [RS:0;defc576eb6b7:45731 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:55:02,960 INFO [RS:0;defc576eb6b7:45731 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:55:02,960 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:55:02,960 INFO [RS:0;defc576eb6b7:45731 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45731 2024-11-14T09:55:02,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:02,973 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,45731,1731578101446 2024-11-14T09:55:02,973 INFO [RS:0;defc576eb6b7:45731 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:55:02,974 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,45731,1731578101446] 2024-11-14T09:55:02,994 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,45731,1731578101446 already deleted, retry=false 2024-11-14T09:55:02,994 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,45731,1731578101446 expired; onlineServers=0 2024-11-14T09:55:02,994 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,40449,1731578101265' ***** 2024-11-14T09:55:02,994 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:55:02,994 INFO [M:0;defc576eb6b7:40449 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:55:02,995 INFO [M:0;defc576eb6b7:40449 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:55:02,995 DEBUG [M:0;defc576eb6b7:40449 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:55:02,995 DEBUG [M:0;defc576eb6b7:40449 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:55:02,995 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:55:02,995 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578101827 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578101827,5,FailOnTimeoutGroup] 2024-11-14T09:55:02,995 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578101827 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578101827,5,FailOnTimeoutGroup] 2024-11-14T09:55:02,995 INFO [M:0;defc576eb6b7:40449 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:55:02,995 INFO [M:0;defc576eb6b7:40449 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:55:02,995 DEBUG [M:0;defc576eb6b7:40449 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:55:02,995 INFO [M:0;defc576eb6b7:40449 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:55:02,996 INFO [M:0;defc576eb6b7:40449 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:55:02,996 INFO [M:0;defc576eb6b7:40449 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:55:02,996 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:55:03,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:55:03,005 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:03,005 DEBUG [M:0;defc576eb6b7:40449 {}] zookeeper.ZKUtil(347): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:55:03,005 WARN [M:0;defc576eb6b7:40449 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:55:03,006 INFO [M:0;defc576eb6b7:40449 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/.lastflushedseqids 2024-11-14T09:55:03,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741836_1012 (size=99) 2024-11-14T09:55:03,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741836_1012 (size=99) 2024-11-14T09:55:03,018 INFO [M:0;defc576eb6b7:40449 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:55:03,018 INFO [M:0;defc576eb6b7:40449 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:55:03,019 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:55:03,019 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:03,019 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:03,019 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:55:03,019 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:03,019 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T09:55:03,042 DEBUG [M:0;defc576eb6b7:40449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/583b070d7a174e908085d053ef2be006 is 82, key is hbase:meta,,1/info:regioninfo/1731578102588/Put/seqid=0 2024-11-14T09:55:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741837_1013 (size=5672) 2024-11-14T09:55:03,050 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741837_1013 (size=5672) 2024-11-14T09:55:03,053 INFO [M:0;defc576eb6b7:40449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/583b070d7a174e908085d053ef2be006 2024-11-14T09:55:03,079 DEBUG [M:0;defc576eb6b7:40449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b36dbd9e6ea44baaa7cb6b04d1a4e54 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731578102647/Put/seqid=0 2024-11-14T09:55:03,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:03,084 INFO [RS:0;defc576eb6b7:45731 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:55:03,084 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:45731-0x10138c4f9a00001, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:03,084 INFO [RS:0;defc576eb6b7:45731 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,45731,1731578101446; zookeeper connection closed. 2024-11-14T09:55:03,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741838_1014 (size=5275) 2024-11-14T09:55:03,084 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@379ec9eb {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@379ec9eb 2024-11-14T09:55:03,084 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741838_1014 (size=5275) 2024-11-14T09:55:03,085 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:55:03,085 INFO [M:0;defc576eb6b7:40449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b36dbd9e6ea44baaa7cb6b04d1a4e54 2024-11-14T09:55:03,110 DEBUG [M:0;defc576eb6b7:40449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf9da98812c947e083f581b4b944847d is 69, key is defc576eb6b7,45731,1731578101446/rs:state/1731578101947/Put/seqid=0 2024-11-14T09:55:03,115 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741839_1015 (size=5156) 2024-11-14T09:55:03,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741839_1015 (size=5156) 2024-11-14T09:55:03,117 INFO [M:0;defc576eb6b7:40449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf9da98812c947e083f581b4b944847d 2024-11-14T09:55:03,145 DEBUG [M:0;defc576eb6b7:40449 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19e54c32326c47a6a9d204a174906fdd is 52, key is load_balancer_on/state:d/1731578102703/Put/seqid=0 2024-11-14T09:55:03,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741840_1016 (size=5056) 2024-11-14T09:55:03,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741840_1016 (size=5056) 2024-11-14T09:55:03,150 INFO [M:0;defc576eb6b7:40449 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19e54c32326c47a6a9d204a174906fdd 2024-11-14T09:55:03,160 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/583b070d7a174e908085d053ef2be006 as hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/583b070d7a174e908085d053ef2be006 2024-11-14T09:55:03,171 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/583b070d7a174e908085d053ef2be006, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T09:55:03,173 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/9b36dbd9e6ea44baaa7cb6b04d1a4e54 as hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b36dbd9e6ea44baaa7cb6b04d1a4e54 2024-11-14T09:55:03,180 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/9b36dbd9e6ea44baaa7cb6b04d1a4e54, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T09:55:03,182 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/cf9da98812c947e083f581b4b944847d as hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf9da98812c947e083f581b4b944847d 2024-11-14T09:55:03,190 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/cf9da98812c947e083f581b4b944847d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T09:55:03,192 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/19e54c32326c47a6a9d204a174906fdd as hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19e54c32326c47a6a9d204a174906fdd 2024-11-14T09:55:03,199 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:35719/user/jenkins/test-data/63609546-998e-13d5-d6b7-5331cef704c1/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/19e54c32326c47a6a9d204a174906fdd, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T09:55:03,201 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 182ms, sequenceid=29, compaction requested=false 2024-11-14T09:55:03,203 INFO [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:03,203 DEBUG [M:0;defc576eb6b7:40449 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578103019Disabling compacts and flushes for region at 1731578103019Disabling writes for close at 1731578103019Obtaining lock to block concurrent updates at 1731578103019Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578103019Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731578103020 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578103021 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578103021Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578103041 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578103041Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578103059 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578103079 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578103079Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578103091 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578103109 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578103109Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578103124 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578103144 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578103144Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@e15442c: reopening flushed file at 1731578103158 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@51d8128a: reopening flushed file at 1731578103172 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2a50ad1b: reopening flushed file at 1731578103180 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71f1296e: reopening flushed file at 1731578103190 (+10 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 182ms, sequenceid=29, compaction requested=false at 1731578103201 (+11 ms)Writing region close event to WAL at 1731578103202 (+1 ms)Closed at 1731578103202 2024-11-14T09:55:03,203 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:03,203 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:03,203 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:03,203 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:03,203 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:03,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46247 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:55:03,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35423 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:55:03,209 INFO [M:0;defc576eb6b7:40449 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:55:03,209 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:55:03,210 INFO [M:0;defc576eb6b7:40449 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40449 2024-11-14T09:55:03,210 INFO [M:0;defc576eb6b7:40449 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:55:03,323 INFO [M:0;defc576eb6b7:40449 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:55:03,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:03,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40449-0x10138c4f9a00000, quorum=127.0.0.1:65279, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:03,326 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11ff445e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:03,327 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:03,327 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:03,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:03,327 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:03,328 WARN [BP-454442519-172.17.0.2-1731578098036 heartbeating to localhost/127.0.0.1:35719 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:03,328 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:03,328 WARN [BP-454442519-172.17.0.2-1731578098036 heartbeating to localhost/127.0.0.1:35719 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454442519-172.17.0.2-1731578098036 (Datanode Uuid 15df0cd2-2780-4303-96c8-a20d7133dd02) service to localhost/127.0.0.1:35719 2024-11-14T09:55:03,328 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:03,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data3/current/BP-454442519-172.17.0.2-1731578098036 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:03,329 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data4/current/BP-454442519-172.17.0.2-1731578098036 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:03,329 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:03,333 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e335929{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:03,333 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:03,333 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:03,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:03,334 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:03,335 WARN [BP-454442519-172.17.0.2-1731578098036 heartbeating to localhost/127.0.0.1:35719 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:03,335 WARN [BP-454442519-172.17.0.2-1731578098036 heartbeating to localhost/127.0.0.1:35719 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-454442519-172.17.0.2-1731578098036 (Datanode Uuid 7e2bf6c9-c644-4a8a-a112-87eb21c01ad4) service to localhost/127.0.0.1:35719 2024-11-14T09:55:03,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data1/current/BP-454442519-172.17.0.2-1731578098036 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:03,336 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:03,336 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:03,336 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/cluster_1d0bf3a5-84b9-31ef-d8a7-f29615def2c6/data/data2/current/BP-454442519-172.17.0.2-1731578098036 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:03,336 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:03,341 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6db7bfac{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:55:03,342 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f2bc681{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:03,342 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:03,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@124e4130{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:03,342 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ea6e47a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:03,350 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:55:03,371 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.log.dir so I do NOT create it in target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c97a262e-5406-ce1b-5311-18181e68405d/hadoop.tmp.dir so I do NOT create it in target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07, deleteOnExit=true 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/test.cache.data in system properties and HBase conf 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:55:03,372 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:55:03,373 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:55:03,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:55:03,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:55:03,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:55:03,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:55:03,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:55:03,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:55:03,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:55:03,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:03,764 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:03,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:03,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:03,773 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:55:03,774 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:03,775 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@237b8284{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:03,776 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@77dbc458{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:03,893 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5ad1779c{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-33543-hadoop-hdfs-3_4_1-tests_jar-_-any-7002069628941167354/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:55:03,894 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@d2dba01{HTTP/1.1, (http/1.1)}{localhost:33543} 2024-11-14T09:55:03,894 INFO [Time-limited test {}] server.Server(415): Started @108529ms 2024-11-14T09:55:03,914 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:55:03,980 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:04,171 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:04,176 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:04,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:04,176 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:04,177 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:55:04,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3acea222{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:04,177 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3dbb786f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:04,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1c728f32{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-46009-hadoop-hdfs-3_4_1-tests_jar-_-any-12827458580449778091/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:04,280 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ee92458{HTTP/1.1, (http/1.1)}{localhost:46009} 2024-11-14T09:55:04,280 INFO [Time-limited test {}] server.Server(415): Started @108915ms 2024-11-14T09:55:04,282 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:04,325 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:04,329 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:04,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:04,332 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:04,332 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:55:04,332 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@536f5169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:04,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ff5e5af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:04,455 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7246d80e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-35617-hadoop-hdfs-3_4_1-tests_jar-_-any-9651923024164645788/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:04,456 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5be1ef63{HTTP/1.1, (http/1.1)}{localhost:35617} 2024-11-14T09:55:04,456 INFO [Time-limited test {}] server.Server(415): Started @109091ms 2024-11-14T09:55:04,458 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:05,321 WARN [Thread-673 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data1/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:05,321 WARN [Thread-674 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data2/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:05,346 WARN [Thread-637 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:05,349 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b252d4bf990702 with lease ID 0xa0ce174adeee56e9: Processing first storage report for DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2 from datanode DatanodeRegistration(127.0.0.1:45521, datanodeUuid=f8c92a6c-2ed8-4adf-bf4a-cfedcd11fcc3, infoPort=40761, infoSecurePort=0, ipcPort=40099, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b252d4bf990702 with lease ID 0xa0ce174adeee56e9: from storage DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2 node DatanodeRegistration(127.0.0.1:45521, datanodeUuid=f8c92a6c-2ed8-4adf-bf4a-cfedcd11fcc3, infoPort=40761, infoSecurePort=0, ipcPort=40099, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4b252d4bf990702 with lease ID 0xa0ce174adeee56e9: Processing first storage report for DS-9ae0ad60-d0b7-4807-a18b-0e04a8e51347 from datanode DatanodeRegistration(127.0.0.1:45521, datanodeUuid=f8c92a6c-2ed8-4adf-bf4a-cfedcd11fcc3, infoPort=40761, infoSecurePort=0, ipcPort=40099, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:05,350 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4b252d4bf990702 with lease ID 0xa0ce174adeee56e9: from storage DS-9ae0ad60-d0b7-4807-a18b-0e04a8e51347 node DatanodeRegistration(127.0.0.1:45521, datanodeUuid=f8c92a6c-2ed8-4adf-bf4a-cfedcd11fcc3, infoPort=40761, infoSecurePort=0, ipcPort=40099, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:05,459 WARN [Thread-684 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:05,460 WARN [Thread-685 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:05,491 WARN [Thread-660 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:05,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc275f688ac9ff94d with lease ID 0xa0ce174adeee56ea: Processing first storage report for DS-44d46d50-330e-43d6-8007-981da901f360 from datanode DatanodeRegistration(127.0.0.1:37119, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=41273, infoSecurePort=0, ipcPort=36251, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:05,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc275f688ac9ff94d with lease ID 0xa0ce174adeee56ea: from storage DS-44d46d50-330e-43d6-8007-981da901f360 node DatanodeRegistration(127.0.0.1:37119, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=41273, infoSecurePort=0, ipcPort=36251, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:05,494 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc275f688ac9ff94d with lease ID 0xa0ce174adeee56ea: Processing first storage report for DS-5d9650ef-a569-489f-ad99-20f4a3bde754 from datanode DatanodeRegistration(127.0.0.1:37119, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=41273, infoSecurePort=0, ipcPort=36251, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:05,494 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc275f688ac9ff94d with lease ID 0xa0ce174adeee56ea: from storage DS-5d9650ef-a569-489f-ad99-20f4a3bde754 node DatanodeRegistration(127.0.0.1:37119, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=41273, infoSecurePort=0, ipcPort=36251, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:05,506 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394 2024-11-14T09:55:05,509 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/zookeeper_0, clientPort=59567, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:55:05,512 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59567 2024-11-14T09:55:05,512 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,514 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:05,526 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:05,527 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527 with version=8 2024-11-14T09:55:05,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:55:05,529 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:55:05,529 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:05,530 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36445 2024-11-14T09:55:05,532 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:36445 connecting to ZooKeeper ensemble=127.0.0.1:59567 2024-11-14T09:55:05,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:364450x0, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:05,597 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:36445-0x10138c50a4f0000 connected 2024-11-14T09:55:05,700 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,702 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,706 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:05,706 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527, hbase.cluster.distributed=false 2024-11-14T09:55:05,710 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:05,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36445 2024-11-14T09:55:05,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36445 2024-11-14T09:55:05,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36445 2024-11-14T09:55:05,728 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36445 2024-11-14T09:55:05,732 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36445 2024-11-14T09:55:05,754 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:05,754 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,755 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:05,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:05,755 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:05,755 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:55:05,757 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:05,758 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:37839 2024-11-14T09:55:05,760 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:37839 connecting to ZooKeeper ensemble=127.0.0.1:59567 2024-11-14T09:55:05,761 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,765 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:378390x0, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:05,784 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:37839-0x10138c50a4f0001 connected 2024-11-14T09:55:05,785 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:05,786 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:55:05,796 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:55:05,797 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:55:05,798 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:05,800 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-14T09:55:05,803 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=37839 2024-11-14T09:55:05,804 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=37839 2024-11-14T09:55:05,807 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-14T09:55:05,809 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=37839 2024-11-14T09:55:05,830 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:36445 2024-11-14T09:55:05,830 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,36445,1731578105529 2024-11-14T09:55:05,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:05,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:05,837 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,36445,1731578105529 2024-11-14T09:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:05,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:55:05,847 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:05,847 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:55:05,847 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,36445,1731578105529 from backup master directory 2024-11-14T09:55:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,36445,1731578105529 2024-11-14T09:55:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:05,857 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:05,857 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:05,857 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,36445,1731578105529 2024-11-14T09:55:05,866 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/hbase.id] with ID: dc039e93-fb69-40e5-9d1d-7f8f7545f872 2024-11-14T09:55:05,866 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/.tmp/hbase.id 2024-11-14T09:55:05,874 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:05,875 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:05,875 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/.tmp/hbase.id]:[hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/hbase.id] 2024-11-14T09:55:05,895 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:05,895 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:55:05,897 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-14T09:55:05,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:05,910 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:05,919 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:55:05,920 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:55:05,920 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:05,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:05,936 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:05,938 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store 2024-11-14T09:55:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:05,946 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:06,347 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:06,347 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:55:06,348 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:06,348 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:06,348 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:55:06,348 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:06,348 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:06,348 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578106347Disabling compacts and flushes for region at 1731578106347Disabling writes for close at 1731578106348 (+1 ms)Writing region close event to WAL at 1731578106348Closed at 1731578106348 2024-11-14T09:55:06,351 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/.initializing 2024-11-14T09:55:06,351 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529 2024-11-14T09:55:06,355 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C36445%2C1731578105529, suffix=, logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529, archiveDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/oldWALs, maxLogs=10 2024-11-14T09:55:06,356 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C36445%2C1731578105529.1731578106356 2024-11-14T09:55:06,363 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 2024-11-14T09:55:06,365 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-14T09:55:06,374 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:06,375 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:06,375 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,375 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,379 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,381 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:55:06,381 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:06,382 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,383 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:55:06,383 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:06,384 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,386 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:55:06,386 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:06,387 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,389 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:55:06,389 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,390 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:06,390 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,391 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,392 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,394 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,394 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,395 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:55:06,396 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:06,399 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:06,400 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=868187, jitterRate=0.10395771265029907}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:55:06,401 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578106375Initializing all the Stores at 1731578106376 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578106376Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578106379 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578106379Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578106379Cleaning up temporary data from old regions at 1731578106394 (+15 ms)Region opened successfully at 1731578106401 (+7 ms) 2024-11-14T09:55:06,401 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:55:06,405 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5756db4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:06,406 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:55:06,407 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:55:06,407 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:55:06,407 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:55:06,408 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:55:06,408 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:55:06,408 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:55:06,411 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:55:06,412 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:55:06,457 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:55:06,457 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:55:06,458 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:55:06,467 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:55:06,468 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:55:06,470 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:55:06,478 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:55:06,479 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:55:06,488 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:55:06,491 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:55:06,499 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:55:06,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:06,509 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:06,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,510 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,510 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,36445,1731578105529, sessionid=0x10138c50a4f0000, setting cluster-up flag (Was=false) 2024-11-14T09:55:06,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,562 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:55:06,565 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,36445,1731578105529 2024-11-14T09:55:06,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:06,625 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:55:06,627 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,36445,1731578105529 2024-11-14T09:55:06,629 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:55:06,633 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:06,634 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:55:06,634 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:55:06,634 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,36445,1731578105529 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:55:06,636 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:06,636 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:06,636 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:06,636 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:06,636 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:55:06,637 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,637 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:06,637 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,647 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:06,647 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:55:06,648 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578136648 2024-11-14T09:55:06,648 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:55:06,649 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:55:06,649 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:55:06,650 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:55:06,650 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:55:06,650 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:55:06,650 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578106650,5,FailOnTimeoutGroup] 2024-11-14T09:55:06,651 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578106651,5,FailOnTimeoutGroup] 2024-11-14T09:55:06,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:55:06,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:06,661 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:06,663 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:55:06,663 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527 2024-11-14T09:55:06,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:06,684 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:06,685 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:06,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:06,690 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:06,691 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,691 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:06,692 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:06,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:06,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:06,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:06,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:06,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:06,699 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:06,700 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:06,700 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:06,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:06,701 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:06,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740 2024-11-14T09:55:06,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740 2024-11-14T09:55:06,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:06,703 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:06,704 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:06,706 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:06,708 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:06,709 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=875360, jitterRate=0.1130785346031189}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:06,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578106685Initializing all the Stores at 1731578106686 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578106686Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578106688 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578106688Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578106688Cleaning up temporary data from old regions at 1731578106703 (+15 ms)Region opened successfully at 1731578106710 (+7 ms) 2024-11-14T09:55:06,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:55:06,710 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:55:06,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:55:06,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:55:06,710 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:55:06,713 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:06,713 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578106710Disabling compacts and flushes for region at 1731578106710Disabling writes for close at 1731578106710Writing region close event to WAL at 1731578106713 (+3 ms)Closed at 1731578106713 2024-11-14T09:55:06,715 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:06,715 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:55:06,715 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:55:06,717 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:06,718 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(746): ClusterId : dc039e93-fb69-40e5-9d1d-7f8f7545f872 2024-11-14T09:55:06,718 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:55:06,718 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:55:06,732 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:55:06,732 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:55:06,742 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:55:06,743 DEBUG [RS:0;defc576eb6b7:37839 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4fa8e196, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:06,755 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:37839 2024-11-14T09:55:06,755 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:55:06,755 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:55:06,755 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:55:06,756 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,36445,1731578105529 with port=37839, startcode=1731578105754 2024-11-14T09:55:06,756 DEBUG [RS:0;defc576eb6b7:37839 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:55:06,759 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39791, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:55:06,759 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36445 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,759 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36445 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,762 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527 2024-11-14T09:55:06,762 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37757 2024-11-14T09:55:06,762 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:55:06,848 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:06,849 DEBUG [RS:0;defc576eb6b7:37839 {}] zookeeper.ZKUtil(111): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,849 WARN [RS:0;defc576eb6b7:37839 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:06,849 INFO [RS:0;defc576eb6b7:37839 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:06,849 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,853 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,37839,1731578105754] 2024-11-14T09:55:06,856 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:55:06,859 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:55:06,860 INFO [RS:0;defc576eb6b7:37839 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:55:06,860 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,862 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:55:06,864 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:55:06,864 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,864 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,864 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,864 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:06,865 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:06,866 DEBUG [RS:0;defc576eb6b7:37839 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:06,868 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,869 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,869 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,869 WARN [defc576eb6b7:36445 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:55:06,869 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,869 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,869 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,37839,1731578105754-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:06,891 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:55:06,891 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,37839,1731578105754-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,891 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,891 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.Replication(171): defc576eb6b7,37839,1731578105754 started 2024-11-14T09:55:06,912 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:06,912 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,37839,1731578105754, RpcServer on defc576eb6b7/172.17.0.2:37839, sessionid=0x10138c50a4f0001 2024-11-14T09:55:06,913 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:55:06,913 DEBUG [RS:0;defc576eb6b7:37839 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,913 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,37839,1731578105754' 2024-11-14T09:55:06,913 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:55:06,913 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,37839,1731578105754 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,37839,1731578105754' 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:55:06,914 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:55:06,915 DEBUG [RS:0;defc576eb6b7:37839 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:55:06,915 INFO [RS:0;defc576eb6b7:37839 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:55:06,915 INFO [RS:0;defc576eb6b7:37839 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:55:07,017 INFO [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C37839%2C1731578105754, suffix=, logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754, archiveDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs, maxLogs=32 2024-11-14T09:55:07,018 INFO [RS:0;defc576eb6b7:37839 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578107018 2024-11-14T09:55:07,027 INFO [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 2024-11-14T09:55:07,029 DEBUG [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-14T09:55:07,119 DEBUG [defc576eb6b7:36445 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:55:07,120 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:07,122 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,37839,1731578105754, state=OPENING 2024-11-14T09:55:07,194 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:55:07,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:07,225 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:07,226 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:07,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:07,226 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,37839,1731578105754}] 2024-11-14T09:55:07,226 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:07,381 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:55:07,384 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38145, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:55:07,388 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:55:07,388 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:07,390 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C37839%2C1731578105754.meta, suffix=.meta, logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754, archiveDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs, maxLogs=32 2024-11-14T09:55:07,391 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta 2024-11-14T09:55:07,396 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta 2024-11-14T09:55:07,399 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41273:41273),(127.0.0.1/127.0.0.1:40761:40761)] 2024-11-14T09:55:07,401 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:07,401 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:55:07,402 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:55:07,402 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:55:07,402 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:55:07,402 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:07,402 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:55:07,402 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:55:07,404 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:07,408 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:07,408 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:07,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:07,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:07,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:07,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:07,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:07,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:07,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:07,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:07,412 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:07,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:07,413 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:07,413 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:07,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:07,414 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:07,415 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740 2024-11-14T09:55:07,416 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740 2024-11-14T09:55:07,418 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:07,418 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:07,418 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:07,420 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:07,421 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821708, jitterRate=0.044856712222099304}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:07,421 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:55:07,422 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578107402Writing region info on filesystem at 1731578107402Initializing all the Stores at 1731578107404 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578107404Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578107404Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578107404Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578107404Cleaning up temporary data from old regions at 1731578107418 (+14 ms)Running coprocessor post-open hooks at 1731578107421 (+3 ms)Region opened successfully at 1731578107422 (+1 ms) 2024-11-14T09:55:07,424 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578107381 2024-11-14T09:55:07,427 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:55:07,427 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:55:07,428 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:07,430 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,37839,1731578105754, state=OPEN 2024-11-14T09:55:07,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:07,464 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:07,464 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:07,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:07,465 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:07,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:55:07,468 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,37839,1731578105754 in 239 msec 2024-11-14T09:55:07,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:55:07,472 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 753 msec 2024-11-14T09:55:07,474 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:07,474 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:55:07,475 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:07,475 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,37839,1731578105754, seqNum=-1] 2024-11-14T09:55:07,476 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:07,477 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60403, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:07,484 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 850 msec 2024-11-14T09:55:07,484 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578107484, completionTime=-1 2024-11-14T09:55:07,484 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:55:07,484 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578167486 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578227486 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,487 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:36445, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,487 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,487 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,489 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.635sec 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:07,492 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:55:07,495 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:55:07,495 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:55:07,495 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36445,1731578105529-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,520 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@18dbfa70, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:07,520 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,36445,-1 for getting cluster id 2024-11-14T09:55:07,520 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:55:07,522 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dc039e93-fb69-40e5-9d1d-7f8f7545f872' 2024-11-14T09:55:07,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:55:07,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dc039e93-fb69-40e5-9d1d-7f8f7545f872" 2024-11-14T09:55:07,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781cdd7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:07,523 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,36445,-1] 2024-11-14T09:55:07,524 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:55:07,524 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:07,526 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38542, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:55:07,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15fefef2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:07,527 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:07,528 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,37839,1731578105754, seqNum=-1] 2024-11-14T09:55:07,528 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:07,530 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41522, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:07,532 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,36445,1731578105529 2024-11-14T09:55:07,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:07,536 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:55:07,554 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:55:07,554 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:07,555 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35821 2024-11-14T09:55:07,557 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35821 connecting to ZooKeeper ensemble=127.0.0.1:59567 2024-11-14T09:55:07,558 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:07,560 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:07,583 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:358210x0, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:07,584 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:358210x0, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-14T09:55:07,584 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35821-0x10138c50a4f0002 connected 2024-11-14T09:55:07,584 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-14T09:55:07,585 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:55:07,590 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:55:07,591 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:55:07,592 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:07,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35821 2024-11-14T09:55:07,596 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35821 2024-11-14T09:55:07,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35821 2024-11-14T09:55:07,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35821 2024-11-14T09:55:07,597 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35821 2024-11-14T09:55:07,598 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(746): ClusterId : dc039e93-fb69-40e5-9d1d-7f8f7545f872 2024-11-14T09:55:07,598 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:55:07,605 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:55:07,606 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:55:07,616 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:55:07,616 DEBUG [RS:1;defc576eb6b7:35821 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280686c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:07,647 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;defc576eb6b7:35821 2024-11-14T09:55:07,647 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:55:07,647 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:55:07,647 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:55:07,648 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,36445,1731578105529 with port=35821, startcode=1731578107553 2024-11-14T09:55:07,649 DEBUG [RS:1;defc576eb6b7:35821 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:55:07,651 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49851, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:55:07,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36445 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,651 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=36445 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,653 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527 2024-11-14T09:55:07,653 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37757 2024-11-14T09:55:07,653 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:55:07,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:07,668 DEBUG [RS:1;defc576eb6b7:35821 {}] zookeeper.ZKUtil(111): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,668 WARN [RS:1;defc576eb6b7:35821 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:07,668 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,35821,1731578107553] 2024-11-14T09:55:07,668 INFO [RS:1;defc576eb6b7:35821 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:07,668 DEBUG [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,672 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:55:07,682 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:55:07,682 INFO [RS:1;defc576eb6b7:35821 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:55:07,682 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,683 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:55:07,684 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:55:07,684 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,684 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,685 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,685 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,685 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:07,685 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:07,685 DEBUG [RS:1;defc576eb6b7:35821 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:07,685 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,686 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,686 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,686 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,686 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,686 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,35821,1731578107553-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:07,702 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:55:07,702 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,35821,1731578107553-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,703 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,703 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.Replication(171): defc576eb6b7,35821,1731578107553 started 2024-11-14T09:55:07,717 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:07,717 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,35821,1731578107553, RpcServer on defc576eb6b7/172.17.0.2:35821, sessionid=0x10138c50a4f0002 2024-11-14T09:55:07,718 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:55:07,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;defc576eb6b7:35821,5,FailOnTimeoutGroup] 2024-11-14T09:55:07,718 DEBUG [RS:1;defc576eb6b7:35821 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,718 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,35821,1731578107553' 2024-11-14T09:55:07,718 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:55:07,718 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-14T09:55:07,718 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:55:07,718 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,35821,1731578107553 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,35821,1731578107553' 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:55:07,719 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:55:07,720 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is defc576eb6b7,36445,1731578105529 2024-11-14T09:55:07,720 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37809924 2024-11-14T09:55:07,720 DEBUG [RS:1;defc576eb6b7:35821 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:55:07,720 INFO [RS:1;defc576eb6b7:35821 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:55:07,720 INFO [RS:1;defc576eb6b7:35821 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:55:07,720 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:55:07,722 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38548, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:55:07,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:55:07,722 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:55:07,723 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:55:07,724 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:55:07,726 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:55:07,726 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:07,726 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-14T09:55:07,727 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:55:07,728 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:55:07,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:55:07,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:55:07,741 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => a2dffcd02ef94776ff1269e1753400cb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527 2024-11-14T09:55:07,748 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37119 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:55:07,749 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45521 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing a2dffcd02ef94776ff1269e1753400cb, disabling compactions & flushes 2024-11-14T09:55:07,749 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. after waiting 0 ms 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:07,749 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:07,749 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for a2dffcd02ef94776ff1269e1753400cb: Waiting for close lock at 1731578107749Disabling compacts and flushes for region at 1731578107749Disabling writes for close at 1731578107749Writing region close event to WAL at 1731578107749Closed at 1731578107749 2024-11-14T09:55:07,751 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:55:07,751 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1731578107751"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578107751"}]},"ts":"1731578107751"} 2024-11-14T09:55:07,754 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:55:07,755 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:55:07,756 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578107756"}]},"ts":"1731578107756"} 2024-11-14T09:55:07,758 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-14T09:55:07,759 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a2dffcd02ef94776ff1269e1753400cb, ASSIGN}] 2024-11-14T09:55:07,761 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a2dffcd02ef94776ff1269e1753400cb, ASSIGN 2024-11-14T09:55:07,762 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a2dffcd02ef94776ff1269e1753400cb, ASSIGN; state=OFFLINE, location=defc576eb6b7,37839,1731578105754; forceNewPlan=false, retain=false 2024-11-14T09:55:07,824 INFO [RS:1;defc576eb6b7:35821 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C35821%2C1731578107553, suffix=, logDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553, archiveDir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs, maxLogs=32 2024-11-14T09:55:07,825 INFO [RS:1;defc576eb6b7:35821 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C35821%2C1731578107553.1731578107825 2024-11-14T09:55:07,835 INFO [RS:1;defc576eb6b7:35821 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 2024-11-14T09:55:07,838 DEBUG [RS:1;defc576eb6b7:35821 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40761:40761),(127.0.0.1/127.0.0.1:41273:41273)] 2024-11-14T09:55:07,913 INFO [defc576eb6b7:36445 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-14T09:55:07,914 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a2dffcd02ef94776ff1269e1753400cb, regionState=OPENING, regionLocation=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:07,916 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a2dffcd02ef94776ff1269e1753400cb, ASSIGN because future has completed 2024-11-14T09:55:07,917 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2dffcd02ef94776ff1269e1753400cb, server=defc576eb6b7,37839,1731578105754}] 2024-11-14T09:55:08,066 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:55:08,067 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:08,074 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:08,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => a2dffcd02ef94776ff1269e1753400cb, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:08,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:08,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,078 INFO [StoreOpener-a2dffcd02ef94776ff1269e1753400cb-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,080 INFO [StoreOpener-a2dffcd02ef94776ff1269e1753400cb-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region a2dffcd02ef94776ff1269e1753400cb columnFamilyName info 2024-11-14T09:55:08,080 DEBUG [StoreOpener-a2dffcd02ef94776ff1269e1753400cb-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:08,081 INFO [StoreOpener-a2dffcd02ef94776ff1269e1753400cb-1 {}] regionserver.HStore(327): Store=a2dffcd02ef94776ff1269e1753400cb/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:08,082 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,083 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,084 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,085 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,085 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,087 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:08,087 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,089 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:08,090 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:08,090 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:08,091 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened a2dffcd02ef94776ff1269e1753400cb; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747516, jitterRate=-0.049485087394714355}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:55:08,091 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:08,092 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for a2dffcd02ef94776ff1269e1753400cb: Running coprocessor pre-open hook at 1731578108076Writing region info on filesystem at 1731578108076Initializing all the Stores at 1731578108077 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578108077Cleaning up temporary data from old regions at 1731578108085 (+8 ms)Running coprocessor post-open hooks at 1731578108091 (+6 ms)Region opened successfully at 1731578108092 (+1 ms) 2024-11-14T09:55:08,093 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb., pid=6, masterSystemTime=1731578108070 2024-11-14T09:55:08,096 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:08,096 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:08,098 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=a2dffcd02ef94776ff1269e1753400cb, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,37839,1731578105754 2024-11-14T09:55:08,101 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure a2dffcd02ef94776ff1269e1753400cb, server=defc576eb6b7,37839,1731578105754 because future has completed 2024-11-14T09:55:08,109 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:55:08,120 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure a2dffcd02ef94776ff1269e1753400cb, server=defc576eb6b7,37839,1731578105754 in 187 msec 2024-11-14T09:55:08,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:55:08,122 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=a2dffcd02ef94776ff1269e1753400cb, ASSIGN in 350 msec 2024-11-14T09:55:08,126 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:55:08,126 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578108126"}]},"ts":"1731578108126"} 2024-11-14T09:55:08,129 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-14T09:55:08,130 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:55:08,132 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 407 msec 2024-11-14T09:55:12,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:55:12,058 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:55:12,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:55:12,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-14T09:55:12,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:55:12,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:55:12,856 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-14T09:55:13,578 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:55:13,579 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:13,597 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:13,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:13,602 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:17,833 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=36445 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:55:17,833 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-14T09:55:17,833 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-14T09:55:17,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:55:17,838 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:17,851 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:17,854 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:17,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:17,855 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:17,855 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:55:17,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5c68a5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:17,856 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@13bc47a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:17,956 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@21371268{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-40851-hadoop-hdfs-3_4_1-tests_jar-_-any-13448726060314999324/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:17,957 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@138afca9{HTTP/1.1, (http/1.1)}{localhost:40851} 2024-11-14T09:55:17,957 INFO [Time-limited test {}] server.Server(415): Started @122592ms 2024-11-14T09:55:17,958 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:17,994 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:17,998 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:17,998 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:17,998 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:17,998 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:55:17,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@60d7a404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:17,999 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3727c2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:18,103 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b41f435{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-45681-hadoop-hdfs-3_4_1-tests_jar-_-any-2667613174065572854/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:18,103 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1fa52141{HTTP/1.1, (http/1.1)}{localhost:45681} 2024-11-14T09:55:18,103 INFO [Time-limited test {}] server.Server(415): Started @122738ms 2024-11-14T09:55:18,105 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:18,149 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:18,152 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:18,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:18,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:18,153 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:55:18,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@36e646c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:18,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43081444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:18,256 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4b81e014{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-36615-hadoop-hdfs-3_4_1-tests_jar-_-any-10049882499761780152/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:18,256 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33f41830{HTTP/1.1, (http/1.1)}{localhost:36615} 2024-11-14T09:55:18,256 INFO [Time-limited test {}] server.Server(415): Started @122891ms 2024-11-14T09:55:18,258 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:19,702 WARN [Thread-870 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data6/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,702 WARN [Thread-869 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data5/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,718 WARN [Thread-810 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:19,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc52aa73c934a396c with lease ID 0xa0ce174adeee56eb: Processing first storage report for DS-7f28020a-5b16-4253-b525-24e8a83eb3a8 from datanode DatanodeRegistration(127.0.0.1:45887, datanodeUuid=bc6f041d-988c-4993-b530-dd7887e1acb7, infoPort=39943, infoSecurePort=0, ipcPort=37853, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc52aa73c934a396c with lease ID 0xa0ce174adeee56eb: from storage DS-7f28020a-5b16-4253-b525-24e8a83eb3a8 node DatanodeRegistration(127.0.0.1:45887, datanodeUuid=bc6f041d-988c-4993-b530-dd7887e1acb7, infoPort=39943, infoSecurePort=0, ipcPort=37853, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,721 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xc52aa73c934a396c with lease ID 0xa0ce174adeee56eb: Processing first storage report for DS-ca7e1f8b-c820-4126-aee2-932f0a49559a from datanode DatanodeRegistration(127.0.0.1:45887, datanodeUuid=bc6f041d-988c-4993-b530-dd7887e1acb7, infoPort=39943, infoSecurePort=0, ipcPort=37853, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,721 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc52aa73c934a396c with lease ID 0xa0ce174adeee56eb: from storage DS-ca7e1f8b-c820-4126-aee2-932f0a49559a node DatanodeRegistration(127.0.0.1:45887, datanodeUuid=bc6f041d-988c-4993-b530-dd7887e1acb7, infoPort=39943, infoSecurePort=0, ipcPort=37853, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,811 WARN [Thread-881 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data8/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,810 WARN [Thread-880 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data7/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,840 WARN [Thread-832 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:19,843 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa66fb336948fabd8 with lease ID 0xa0ce174adeee56ec: Processing first storage report for DS-2848e0e3-a691-415c-a192-340088078901 from datanode DatanodeRegistration(127.0.0.1:38169, datanodeUuid=a6d07193-06bd-4bf6-84ed-e9e54517daa9, infoPort=39569, infoSecurePort=0, ipcPort=34141, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa66fb336948fabd8 with lease ID 0xa0ce174adeee56ec: from storage DS-2848e0e3-a691-415c-a192-340088078901 node DatanodeRegistration(127.0.0.1:38169, datanodeUuid=a6d07193-06bd-4bf6-84ed-e9e54517daa9, infoPort=39569, infoSecurePort=0, ipcPort=34141, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xa66fb336948fabd8 with lease ID 0xa0ce174adeee56ec: Processing first storage report for DS-2c948cdf-5174-45a3-b0cf-d74abda98c4e from datanode DatanodeRegistration(127.0.0.1:38169, datanodeUuid=a6d07193-06bd-4bf6-84ed-e9e54517daa9, infoPort=39569, infoSecurePort=0, ipcPort=34141, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,844 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa66fb336948fabd8 with lease ID 0xa0ce174adeee56ec: from storage DS-2c948cdf-5174-45a3-b0cf-d74abda98c4e node DatanodeRegistration(127.0.0.1:38169, datanodeUuid=a6d07193-06bd-4bf6-84ed-e9e54517daa9, infoPort=39569, infoSecurePort=0, ipcPort=34141, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,875 WARN [Thread-892 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,875 WARN [Thread-891 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9/current/BP-1062630699-172.17.0.2-1731578103400/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:19,897 WARN [Thread-854 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:19,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cb7212feb6dbad0 with lease ID 0xa0ce174adeee56ed: Processing first storage report for DS-bc57484a-14f6-4106-8149-8f45493364a3 from datanode DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cb7212feb6dbad0 with lease ID 0xa0ce174adeee56ed: from storage DS-bc57484a-14f6-4106-8149-8f45493364a3 node DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5cb7212feb6dbad0 with lease ID 0xa0ce174adeee56ed: Processing first storage report for DS-35b38342-8dcc-4755-9e38-781db92b8b7b from datanode DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400) 2024-11-14T09:55:19,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5cb7212feb6dbad0 with lease ID 0xa0ce174adeee56ed: from storage DS-35b38342-8dcc-4755-9e38-781db92b8b7b node DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:55:19,996 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:19,996 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:19,996 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:19,997 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:19,997 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:19,997 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:19,997 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:19,997 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta block BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:19,997 WARN [PacketResponder: BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:37119] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-33756186_22 at /127.0.0.1:33160 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33160 dst: /127.0.0.1:45521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302789993_22 at /127.0.0.1:38556 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38556 dst: /127.0.0.1:37119 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33130 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33130 dst: /127.0.0.1:45521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302789993_22 at /127.0.0.1:33102 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33102 dst: /127.0.0.1:45521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:38594 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38594 dst: /127.0.0.1:37119 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-33756186_22 at /127.0.0.1:38616 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:37119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38616 dst: /127.0.0.1:37119 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33142 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33142 dst: /127.0.0.1:45521 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:19,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:38590 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37119:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38590 dst: /127.0.0.1:37119 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:20,001 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7246d80e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:20,002 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5be1ef63{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:20,002 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:20,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ff5e5af{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:20,002 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@536f5169{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:20,003 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:20,003 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:20,003 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid 0be7c2c7-e088-4f1f-9297-63c81f0708a9) service to localhost/127.0.0.1:37757 2024-11-14T09:55:20,003 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:20,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:20,004 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:20,004 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:20,006 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1c728f32{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:20,008 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ee92458{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:20,008 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:20,008 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3dbb786f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:20,009 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3acea222{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:20,014 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,014 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta block BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,016 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@2dbb0c59 {}] datanode.DataXceiver(331): 127.0.0.1:45521:DataXceiver error processing unknown operation src: /127.0.0.1:48188 dst: /127.0.0.1:45521 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:20,017 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:20,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:20,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:20,017 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid f8c92a6c-2ed8-4adf-bf4a-cfedcd11fcc3) service to localhost/127.0.0.1:37757 2024-11-14T09:55:20,017 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data1/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:20,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data2/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:20,018 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:20,022 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb., hostname=defc576eb6b7,37839,1731578105754, seqNum=2] 2024-11-14T09:55:20,024 ERROR [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527-prefix:defc576eb6b7,37839,1731578105754 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,024 WARN [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527-prefix:defc576eb6b7,37839,1731578105754 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,025 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,025 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C37839%2C1731578105754:(num 1731578107018) roll requested 2024-11-14T09:55:20,025 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578120025 2024-11-14T09:55:20,029 WARN [Thread-903 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741838_1018 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,029 WARN [Thread-903 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:20,029 WARN [Thread-903 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741838_1018 2024-11-14T09:55:20,031 WARN [Thread-903 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:20,041 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:20,041 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:20,041 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:20,041 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:20,042 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:20,042 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 2024-11-14T09:55:20,044 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,045 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:20,046 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-14T09:55:20,046 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-14T09:55:20,046 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 2024-11-14T09:55:20,046 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39569:39569),(127.0.0.1/127.0.0.1:39943:39943)] 2024-11-14T09:55:20,046 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:20,050 WARN [IPC Server handler 0 on default port 37757 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741833_1009 2024-11-14T09:55:20,054 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 after 5ms 2024-11-14T09:55:21,686 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:21,921 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:22,047 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:22,048 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 2024-11-14T09:55:22,049 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:22,049 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:22,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34562 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:38169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34562 dst: /127.0.0.1:38169 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:22,050 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:48154 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1019] {}] datanode.DataXceiver(331): 127.0.0.1:45887:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48154 dst: /127.0.0.1:45887 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:22,114 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b41f435{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:22,115 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1fa52141{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:22,115 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:22,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3727c2a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:22,115 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@60d7a404{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:22,117 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:22,117 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:22,117 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:22,117 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid a6d07193-06bd-4bf6-84ed-e9e54517daa9) service to localhost/127.0.0.1:37757 2024-11-14T09:55:22,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data7/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:22,117 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data8/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:22,117 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:23,686 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:23,922 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:24,048 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]] 2024-11-14T09:55:24,049 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:24,049 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C37839%2C1731578105754:(num 1731578120025) roll requested 2024-11-14T09:55:24,050 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578124049 2024-11-14T09:55:24,056 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 after 4010ms 2024-11-14T09:55:24,059 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:24,059 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:24,059 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741840_1022 2024-11-14T09:55:24,060 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:24,064 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1023 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:24,064 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60386 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data6]'}, localName='127.0.0.1:45887', datanodeUuid='bc6f041d-988c-4993-b530-dd7887e1acb7', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:24,064 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:24,064 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60386 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023] {}] datanode.BlockReceiver(316): Block 1073741841 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:24,064 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023 2024-11-14T09:55:24,064 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60386 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:45887:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60386 dst: /127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:24,065 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:24,068 WARN [Thread-912 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1024 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37119 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:24,068 WARN [Thread-912 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:24,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33854 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024 to mirror 127.0.0.1:37119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:24,068 WARN [Thread-912 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024 2024-11-14T09:55:24,068 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33854 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024] {}] datanode.BlockReceiver(316): Block 1073741842 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:24,068 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33854 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741842_1024] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33854 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:24,069 WARN [Thread-912 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:24,074 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:24,074 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:24,074 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:24,074 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:24,074 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:24,075 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578124049 2024-11-14T09:55:24,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45887 is added to blk_1073741839_1021 (size=2431) 2024-11-14T09:55:24,077 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39943:39943),(127.0.0.1/127.0.0.1:33389:33389)] 2024-11-14T09:55:24,077 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:24,077 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 is not closed yet, will try archiving it next time 2024-11-14T09:55:24,122 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:55:24,478 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:25,687 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:25,729 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45887, datanodeUuid=bc6f041d-988c-4993-b530-dd7887e1acb7, infoPort=39943, infoSecurePort=0, ipcPort=37853, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741839_1021 to 127.0.0.1:37119 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:25,922 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,078 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,126 WARN [ResponseProcessor for block BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,127 WARN [DataStreamer for file /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578124049 block BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:26,128 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60400 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:45887:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60400 dst: /127.0.0.1:45887 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:26,128 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33860 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1025] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33860 dst: /127.0.0.1:35025 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:26,188 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@21371268{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:26,189 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@138afca9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:26,189 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:26,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@13bc47a2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:26,189 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5c68a5ac{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:26,191 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:26,191 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid bc6f041d-988c-4993-b530-dd7887e1acb7) service to localhost/127.0.0.1:37757 2024-11-14T09:55:26,191 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:26,191 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:26,191 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data5/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:26,191 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data6/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:26,192 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:26,202 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:26,202 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:55:26,222 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/cf141546b3c0414a9cdab486358fc3b3 is 1080, key is row0002/info:/1731578122119/Put/seqid=0 2024-11-14T09:55:26,223 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,224 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:26,224 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741844_1027 2024-11-14T09:55:26,224 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:26,225 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,225 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:26,225 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741845_1028 2024-11-14T09:55:26,226 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:26,227 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,227 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:26,227 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741846_1029 2024-11-14T09:55:26,228 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:26,229 WARN [Thread-926 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,229 WARN [Thread-926 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:26,229 WARN [Thread-926 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741847_1030 2024-11-14T09:55:26,230 WARN [Thread-926 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:26,230 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:26,230 WARN [IPC Server handler 0 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:26,230 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:26,233 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741848_1031 (size=10347) 2024-11-14T09:55:26,635 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/cf141546b3c0414a9cdab486358fc3b3 2024-11-14T09:55:26,645 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/cf141546b3c0414a9cdab486358fc3b3 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3 2024-11-14T09:55:26,652 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3, entries=5, sequenceid=11, filesize=10.1 K 2024-11-14T09:55:26,653 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for a2dffcd02ef94776ff1269e1753400cb in 451ms, sequenceid=11, compaction requested=false 2024-11-14T09:55:26,653 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:26,838 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:26,838 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-14T09:55:26,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c491676d49ef41eb960a84e7cc9b4ff9 is 1080, key is row0007/info:/1731578126204/Put/seqid=0 2024-11-14T09:55:26,847 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,847 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:26,848 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741849_1032 2024-11-14T09:55:26,848 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:26,849 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,849 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:26,849 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741850_1033 2024-11-14T09:55:26,850 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:26,851 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,851 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:26,851 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741851_1034 2024-11-14T09:55:26,851 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:26,853 WARN [Thread-931 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:26,853 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33908 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035 to mirror 127.0.0.1:45521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:26,854 WARN [Thread-931 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:26,854 WARN [Thread-931 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035 2024-11-14T09:55:26,854 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33908 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:26,854 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33908 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33908 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:26,854 WARN [Thread-931 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:26,855 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:26,855 WARN [IPC Server handler 0 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:26,855 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:26,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741853_1036 (size=12506) 2024-11-14T09:55:27,259 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c491676d49ef41eb960a84e7cc9b4ff9 2024-11-14T09:55:27,268 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c491676d49ef41eb960a84e7cc9b4ff9 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9 2024-11-14T09:55:27,276 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9, entries=7, sequenceid=24, filesize=12.2 K 2024-11-14T09:55:27,277 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for a2dffcd02ef94776ff1269e1753400cb in 439ms, sequenceid=24, compaction requested=false 2024-11-14T09:55:27,277 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:27,277 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-14T09:55:27,277 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:27,278 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9 because midkey is the same as first or last row 2024-11-14T09:55:27,687 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:27,923 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,078 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]] 2024-11-14T09:55:28,078 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,078 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C37839%2C1731578105754:(num 1731578124049) roll requested 2024-11-14T09:55:28,079 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578128078 2024-11-14T09:55:28,081 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,081 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:28,081 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741854_1037 2024-11-14T09:55:28,082 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:28,083 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,083 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:28,083 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741855_1038 2024-11-14T09:55:28,083 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:28,084 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,084 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:28,084 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741856_1039 2024-11-14T09:55:28,085 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:28,086 WARN [Thread-936 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,086 WARN [Thread-936 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:28,086 WARN [Thread-936 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741857_1040 2024-11-14T09:55:28,086 WARN [Thread-936 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:28,087 WARN [IPC Server handler 1 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:28,087 WARN [IPC Server handler 1 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:28,087 WARN [IPC Server handler 1 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:28,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:28,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:28,090 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:28,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:28,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:28,091 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578124049 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578128078 2024-11-14T09:55:28,092 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33389:33389)] 2024-11-14T09:55:28,092 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:28,092 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578124049 is not closed yet, will try archiving it next time 2024-11-14T09:55:28,093 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578120025 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C37839%2C1731578105754.1731578120025 2024-11-14T09:55:28,093 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741843_1026 (size=25992) 2024-11-14T09:55:28,275 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:28,275 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:55:28,282 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/5354dc7687c940ff9cf7488c5e504ba0 is 1079, key is tmprow/info:/1731578128273/Put/seqid=0 2024-11-14T09:55:28,284 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,284 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:28,284 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741859_1042 2024-11-14T09:55:28,285 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:28,286 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,286 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:28,286 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741860_1043 2024-11-14T09:55:28,287 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:28,289 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37119 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33940 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044 to mirror 127.0.0.1:37119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,289 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:28,289 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044 2024-11-14T09:55:28,289 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33940 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044] {}] datanode.BlockReceiver(316): Block 1073741861 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:28,289 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33940 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741861_1044] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33940 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,290 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:28,291 WARN [Thread-940 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,291 WARN [Thread-940 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:28,291 WARN [Thread-940 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741862_1045 2024-11-14T09:55:28,292 WARN [Thread-940 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:28,293 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:28,293 WARN [IPC Server handler 0 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:28,293 WARN [IPC Server handler 0 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:28,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741863_1046 (size=6027) 2024-11-14T09:55:28,495 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:28,697 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/5354dc7687c940ff9cf7488c5e504ba0 2024-11-14T09:55:28,703 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/5354dc7687c940ff9cf7488c5e504ba0 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0 2024-11-14T09:55:28,709 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0, entries=1, sequenceid=34, filesize=5.9 K 2024-11-14T09:55:28,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a2dffcd02ef94776ff1269e1753400cb in 435ms, sequenceid=34, compaction requested=true 2024-11-14T09:55:28,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:28,710 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-14T09:55:28,710 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:28,710 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9 because midkey is the same as first or last row 2024-11-14T09:55:28,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2dffcd02ef94776ff1269e1753400cb:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:55:28,711 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:28,711 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:55:28,712 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:55:28,712 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1541): a2dffcd02ef94776ff1269e1753400cb/info is initiating minor compaction (all files) 2024-11-14T09:55:28,712 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a2dffcd02ef94776ff1269e1753400cb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:28,712 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0] into tmpdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp, totalSize=28.2 K 2024-11-14T09:55:28,713 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf141546b3c0414a9cdab486358fc3b3, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731578122119 2024-11-14T09:55:28,714 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting c491676d49ef41eb960a84e7cc9b4ff9, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1731578126204 2024-11-14T09:55:28,714 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5354dc7687c940ff9cf7488c5e504ba0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731578128273 2024-11-14T09:55:28,727 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2dffcd02ef94776ff1269e1753400cb#info#compaction#21 average throughput is 12.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:55:28,728 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/cf6a42327f644f649d47727fe2a29245 is 1080, key is row0002/info:/1731578122119/Put/seqid=0 2024-11-14T09:55:28,730 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,730 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:28,730 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741864_1047 2024-11-14T09:55:28,731 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:28,732 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,732 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:28,732 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741865_1048 2024-11-14T09:55:28,733 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:28,735 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33962 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,735 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:28,735 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049 2024-11-14T09:55:28,735 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33962 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:28,735 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33962 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33962 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,735 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:28,738 WARN [Thread-947 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45887 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:28,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33974 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050 to mirror 127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,738 WARN [Thread-947 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:28,738 WARN [Thread-947 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050 2024-11-14T09:55:28,738 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33974 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050] {}] datanode.BlockReceiver(316): Block 1073741867 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:28,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:33974 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741867_1050] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33974 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,738 WARN [Thread-947 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:28,739 WARN [IPC Server handler 2 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:28,739 WARN [IPC Server handler 2 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:28,739 WARN [IPC Server handler 2 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:28,742 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741868_1051 (size=17994) 2024-11-14T09:55:28,904 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3141a6d5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741848_1031 to 127.0.0.1:45521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:28,904 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741853_1036 to 127.0.0.1:45887 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,153 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/cf6a42327f644f649d47727fe2a29245 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 2024-11-14T09:55:29,162 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a2dffcd02ef94776ff1269e1753400cb/info of a2dffcd02ef94776ff1269e1753400cb into cf6a42327f644f649d47727fe2a29245(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:29,163 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb., storeName=a2dffcd02ef94776ff1269e1753400cb/info, priority=13, startTime=1731578128710; duration=0sec 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 because midkey is the same as first or last row 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 because midkey is the same as first or last row 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 because midkey is the same as first or last row 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:29,163 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2dffcd02ef94776ff1269e1753400cb:info 2024-11-14T09:55:29,688 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:29,699 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:29,699 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:55:29,703 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/ed35f13c34a04478ab7d89d9f8ad30f0 is 1079, key is tmprow/info:/1731578129698/Put/seqid=0 2024-11-14T09:55:29,704 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:29,705 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:29,705 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741869_1052 2024-11-14T09:55:29,705 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:29,707 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:29,707 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:29,707 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741870_1053 2024-11-14T09:55:29,708 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:29,711 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37119 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:29,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34004 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054 to mirror 127.0.0.1:37119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,711 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:29,711 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054 2024-11-14T09:55:29,711 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34004 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054] {}] datanode.BlockReceiver(316): Block 1073741871 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:29,711 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34004 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741871_1054] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34004 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,712 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:29,714 WARN [Thread-956 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:29,714 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34006 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055 to mirror 127.0.0.1:45521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,714 WARN [Thread-956 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:29,714 WARN [Thread-956 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055 2024-11-14T09:55:29,714 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34006 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:29,715 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34006 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34006 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,715 WARN [Thread-956 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:29,716 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:29,716 WARN [IPC Server handler 3 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:29,716 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:29,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741873_1056 (size=6027) 2024-11-14T09:55:29,905 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741843_1026 to 127.0.0.1:45887 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:29,923 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,093 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]] 2024-11-14T09:55:30,093 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,093 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C37839%2C1731578105754:(num 1731578128078) roll requested 2024-11-14T09:55:30,093 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578130093 2024-11-14T09:55:30,097 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,097 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:30,097 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741874_1057 2024-11-14T09:55:30,098 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:30,100 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,100 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:30,100 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741875_1058 2024-11-14T09:55:30,101 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:30,101 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,102 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:30,102 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741876_1059 2024-11-14T09:55:30,102 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:30,104 WARN [Thread-962 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:30,104 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34020 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060 to mirror 127.0.0.1:45521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:30,105 WARN [Thread-962 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:30,105 WARN [Thread-962 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060 2024-11-14T09:55:30,105 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34020 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:30,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34020 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34020 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:30,105 WARN [Thread-962 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:30,106 WARN [IPC Server handler 1 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:30,106 WARN [IPC Server handler 1 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:30,106 WARN [IPC Server handler 1 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:30,109 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:30,109 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:30,109 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:30,109 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:30,109 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:30,109 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578128078 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578130093 2024-11-14T09:55:30,110 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33389:33389)] 2024-11-14T09:55:30,110 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:30,110 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578128078 is not closed yet, will try archiving it next time 2024-11-14T09:55:30,110 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578124049 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C37839%2C1731578105754.1731578124049 2024-11-14T09:55:30,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741858_1041 (size=13234) 2024-11-14T09:55:30,120 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/ed35f13c34a04478ab7d89d9f8ad30f0 2024-11-14T09:55:30,127 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/ed35f13c34a04478ab7d89d9f8ad30f0 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0 2024-11-14T09:55:30,134 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0, entries=1, sequenceid=45, filesize=5.9 K 2024-11-14T09:55:30,135 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a2dffcd02ef94776ff1269e1753400cb in 436ms, sequenceid=45, compaction requested=false 2024-11-14T09:55:30,135 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:30,135 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-14T09:55:30,135 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:30,135 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 because midkey is the same as first or last row 2024-11-14T09:55:30,512 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 is not closed yet, will try archiving it next time 2024-11-14T09:55:31,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:31,119 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:55:31,128 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/f677b6520c8a47f098801e2f172cc069 is 1079, key is tmprow/info:/1731578131117/Put/seqid=0 2024-11-14T09:55:31,132 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,132 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34048 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,133 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:31,133 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34048 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:31,133 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062 2024-11-14T09:55:31,133 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34048 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34048 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,134 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:31,136 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,136 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:31,136 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741880_1063 2024-11-14T09:55:31,137 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:31,139 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45887 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,139 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34064 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064 to mirror 127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,140 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:31,140 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064 2024-11-14T09:55:31,140 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34064 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064] {}] datanode.BlockReceiver(316): Block 1073741881 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:31,140 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34064 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741881_1064] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34064 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,140 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:31,143 WARN [Thread-967 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37119 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34076 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065 to mirror 127.0.0.1:37119 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,143 WARN [Thread-967 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:31,143 WARN [Thread-967 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065 2024-11-14T09:55:31,143 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34076 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:31,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34076 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34076 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,144 WARN [Thread-967 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:31,145 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:31,145 WARN [IPC Server handler 3 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:31,145 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:31,149 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741883_1066 (size=6027) 2024-11-14T09:55:31,550 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/f677b6520c8a47f098801e2f172cc069 2024-11-14T09:55:31,557 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/f677b6520c8a47f098801e2f172cc069 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069 2024-11-14T09:55:31,563 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069, entries=1, sequenceid=55, filesize=5.9 K 2024-11-14T09:55:31,564 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for a2dffcd02ef94776ff1269e1753400cb in 446ms, sequenceid=55, compaction requested=true 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 because midkey is the same as first or last row 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2dffcd02ef94776ff1269e1753400cb:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:55:31,564 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:31,564 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:55:31,566 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:55:31,566 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1541): a2dffcd02ef94776ff1269e1753400cb/info is initiating minor compaction (all files) 2024-11-14T09:55:31,566 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a2dffcd02ef94776ff1269e1753400cb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:31,566 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069] into tmpdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp, totalSize=29.3 K 2024-11-14T09:55:31,566 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting cf6a42327f644f649d47727fe2a29245, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731578122119 2024-11-14T09:55:31,567 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting ed35f13c34a04478ab7d89d9f8ad30f0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1731578129698 2024-11-14T09:55:31,567 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] compactions.Compactor(225): Compacting f677b6520c8a47f098801e2f172cc069, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731578131117 2024-11-14T09:55:31,582 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2dffcd02ef94776ff1269e1753400cb#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:55:31,583 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/303e053db9b14282b8307fbd3233a067 is 1080, key is row0002/info:/1731578122119/Put/seqid=0 2024-11-14T09:55:31,585 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,585 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:31,585 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741884_1067 2024-11-14T09:55:31,585 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:31,587 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,587 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:31,587 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741885_1068 2024-11-14T09:55:31,588 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:31,589 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,589 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]) is bad. 2024-11-14T09:55:31,589 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741886_1069 2024-11-14T09:55:31,589 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37119,DS-44d46d50-330e-43d6-8007-981da901f360,DISK] 2024-11-14T09:55:31,591 WARN [Thread-974 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,591 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34094 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070 to mirror 127.0.0.1:45521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,592 WARN [Thread-974 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:31,592 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34094 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:31,592 WARN [Thread-974 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070 2024-11-14T09:55:31,592 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:34094 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34094 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,592 WARN [Thread-974 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:31,593 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-14T09:55:31,593 WARN [IPC Server handler 3 on default port 37757 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-14T09:55:31,593 WARN [IPC Server handler 3 on default port 37757 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-14T09:55:31,596 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741888_1071 (size=18097) 2024-11-14T09:55:31,689 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:31,907 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3141a6d5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741868_1051 to 127.0.0.1:45521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,907 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741863_1046 to 127.0.0.1:37119 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:31,924 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:32,007 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/303e053db9b14282b8307fbd3233a067 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 2024-11-14T09:55:32,015 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a2dffcd02ef94776ff1269e1753400cb/info of a2dffcd02ef94776ff1269e1753400cb into 303e053db9b14282b8307fbd3233a067(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:55:32,015 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:32,015 INFO [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb., storeName=a2dffcd02ef94776ff1269e1753400cb/info, priority=13, startTime=1731578131564; duration=0sec 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 because midkey is the same as first or last row 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 because midkey is the same as first or last row 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 because midkey is the same as first or last row 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:32,016 DEBUG [RS:0;defc576eb6b7:37839-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2dffcd02ef94776ff1269e1753400cb:info 2024-11-14T09:55:32,111 WARN [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-14T09:55:32,111 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:32,147 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:32,151 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:32,152 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:32,153 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:32,153 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:55:32,153 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7eb80544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:32,154 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6dce0e6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:32,246 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33459d{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/java.io.tmpdir/jetty-localhost-35219-hadoop-hdfs-3_4_1-tests_jar-_-any-7392648605495070780/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:32,246 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3478c7d1{HTTP/1.1, (http/1.1)}{localhost:35219} 2024-11-14T09:55:32,246 INFO [Time-limited test {}] server.Server(415): Started @136882ms 2024-11-14T09:55:32,248 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:32,660 WARN [Thread-993 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:32,667 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5aa146a27ecf89e8 with lease ID 0xa0ce174adeee56ee: from storage DS-44d46d50-330e-43d6-8007-981da901f360 node DatanodeRegistration(127.0.0.1:44627, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=37373, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:55:32,668 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5aa146a27ecf89e8 with lease ID 0xa0ce174adeee56ee: from storage DS-5d9650ef-a569-489f-ad99-20f4a3bde754 node DatanodeRegistration(127.0.0.1:44627, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=37373, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:32,906 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741873_1056 to 127.0.0.1:45521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:32,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741858_1041 (size=13234) 2024-11-14T09:55:33,689 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:33,924 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:34,111 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:34,907 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3141a6d5[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741883_1066 to 127.0.0.1:45887 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:34,908 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741888_1071 to 127.0.0.1:38169 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:35,506 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:55:35,690 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:35,924 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,112 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,651 ERROR [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData-prefix:defc576eb6b7,36445,1731578105529 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,651 WARN [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData-prefix:defc576eb6b7,36445,1731578105529 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,652 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C36445%2C1731578105529:(num 1731578106356) roll requested 2024-11-14T09:55:36,653 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C36445%2C1731578105529.1731578136652 2024-11-14T09:55:36,660 WARN [Thread-1014 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741889_1072 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,660 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302789993_22 at /127.0.0.1:41866 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:36,661 WARN [Thread-1014 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:36,661 WARN [Thread-1014 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072 2024-11-14T09:55:36,661 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_1302789993_22 at /127.0.0.1:41866 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072] {}] datanode.BlockReceiver(316): Block 1073741889 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:36,661 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1302789993_22 at /127.0.0.1:41866 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741889_1072] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:41866 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:36,662 WARN [Thread-1014 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:36,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:36,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:36,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:36,668 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:36,668 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:36,668 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578136652 2024-11-14T09:55:36,669 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,669 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:36,669 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 2024-11-14T09:55:36,669 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33389:33389),(127.0.0.1/127.0.0.1:37373:37373)] 2024-11-14T09:55:36,670 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 is not closed yet, will try archiving it next time 2024-11-14T09:55:36,670 WARN [IPC Server handler 4 on default port 37757 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 has not been closed. Lease recovery is in progress. RecoveryId = 1074 for block blk_1073741830_1006 2024-11-14T09:55:36,670 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 after 1ms 2024-11-14T09:55:37,691 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:38,112 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:39,691 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:40,113 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:40,672 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 after 4003ms 2024-11-14T09:55:41,691 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:42,113 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:42,684 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d0b04a3 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:45521,null,null]) java.net.ConnectException: Call From defc576eb6b7/172.17.0.2 to localhost:40099 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:55:42,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741833_1020 (size=455) 2024-11-14T09:55:43,081 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578107018 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C37839%2C1731578105754.1731578107018 2024-11-14T09:55:43,085 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578128078 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C37839%2C1731578105754.1731578128078 2024-11-14T09:55:43,665 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@7c0c2dac[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:44627, datanodeUuid=0be7c2c7-e088-4f1f-9297-63c81f0708a9, infoPort=37373, infoSecurePort=0, ipcPort=38739, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741833_1020 to 127.0.0.1:45521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:43,692 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:44,114 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:45,692 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:45,811 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.1731578145811 2024-11-14T09:55:45,816 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:45,816 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:45,816 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741891_1075 2024-11-14T09:55:45,817 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:45,818 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1076 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:45,818 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741892_1076 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:45,818 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741892_1076 2024-11-14T09:55:45,819 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:45,820 WARN [Thread-1025 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1077 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:45,821 WARN [Thread-1025 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741893_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK], DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:45,821 WARN [Thread-1025 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741893_1077 2024-11-14T09:55:45,821 WARN [Thread-1025 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:45,826 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:45,826 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:45,826 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:45,826 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:45,827 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:45,827 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578130093 with entries=13, filesize=11.81 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578145811 2024-11-14T09:55:45,829 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741878_1061 (size=12100) 2024-11-14T09:55:45,829 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33389:33389),(127.0.0.1/127.0.0.1:37373:37373)] 2024-11-14T09:55:45,829 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578130093 is not closed yet, will try archiving it next time 2024-11-14T09:55:45,837 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:45,837 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-14T09:55:45,846 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c76539503c15469498a712e37c7e4472 is 1080, key is row0013/info:/1731578145830/Put/seqid=0 2024-11-14T09:55:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741895_1079 (size=9267) 2024-11-14T09:55:45,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741895_1079 (size=9267) 2024-11-14T09:55:45,871 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c76539503c15469498a712e37c7e4472 2024-11-14T09:55:45,880 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/c76539503c15469498a712e37c7e4472 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472 2024-11-14T09:55:45,887 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472, entries=4, sequenceid=66, filesize=9.0 K 2024-11-14T09:55:45,889 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for a2dffcd02ef94776ff1269e1753400cb in 52ms, sequenceid=66, compaction requested=false 2024-11-14T09:55:45,889 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:45,889 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-14T09:55:45,889 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:45,889 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 because midkey is the same as first or last row 2024-11-14T09:55:46,065 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=37839 {}] regionserver.HRegion(8855): Flush requested on a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:46,065 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing a2dffcd02ef94776ff1269e1753400cb 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-14T09:55:46,071 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/247b7ba4e329457c920fb216412abf71 is 1080, key is row0016/info:/1731578145839/Put/seqid=0 2024-11-14T09:55:46,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741896_1080 (size=13583) 2024-11-14T09:55:46,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741896_1080 (size=13583) 2024-11-14T09:55:46,077 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/247b7ba4e329457c920fb216412abf71 2024-11-14T09:55:46,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/247b7ba4e329457c920fb216412abf71 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71 2024-11-14T09:55:46,091 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71, entries=8, sequenceid=78, filesize=13.3 K 2024-11-14T09:55:46,093 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9682, heapSize ~10.36 KB/10608, currentSize=0 B/0 for a2dffcd02ef94776ff1269e1753400cb in 27ms, sequenceid=78, compaction requested=true 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=40.0 K, sizeToCheck=16.0 K 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 because midkey is the same as first or last row 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store a2dffcd02ef94776ff1269e1753400cb:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:55:46,093 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:46,093 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:55:46,094 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 40947 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:55:46,095 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HStore(1541): a2dffcd02ef94776ff1269e1753400cb/info is initiating minor compaction (all files) 2024-11-14T09:55:46,095 INFO [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of a2dffcd02ef94776ff1269e1753400cb/info in TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,095 INFO [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71] into tmpdir=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp, totalSize=40.0 K 2024-11-14T09:55:46,095 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] compactions.Compactor(225): Compacting 303e053db9b14282b8307fbd3233a067, keycount=12, bloomtype=ROW, size=17.7 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1731578122119 2024-11-14T09:55:46,096 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] compactions.Compactor(225): Compacting c76539503c15469498a712e37c7e4472, keycount=4, bloomtype=ROW, size=9.0 K, encoding=NONE, compression=NONE, seqNum=66, earliestPutTs=1731578131934 2024-11-14T09:55:46,096 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] compactions.Compactor(225): Compacting 247b7ba4e329457c920fb216412abf71, keycount=8, bloomtype=ROW, size=13.3 K, encoding=NONE, compression=NONE, seqNum=78, earliestPutTs=1731578145839 2024-11-14T09:55:46,107 INFO [RS:0;defc576eb6b7:37839-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): a2dffcd02ef94776ff1269e1753400cb#info#compaction#27 average throughput is 22.57 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:55:46,108 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/fcc3dd1dad4a4f4cae37bcd18646abe3 is 1080, key is row0002/info:/1731578122119/Put/seqid=0 2024-11-14T09:55:46,110 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1081 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,110 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59226 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4]'}, localName='127.0.0.1:44627', datanodeUuid='0be7c2c7-e088-4f1f-9297-63c81f0708a9', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,110 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:46,111 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081 2024-11-14T09:55:46,111 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59226 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081] {}] datanode.BlockReceiver(316): Block 1073741897 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:46,111 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59226 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741897_1081] {}] datanode.DataXceiver(331): 127.0.0.1:44627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59226 dst: /127.0.0.1:44627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,111 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:46,112 WARN [Thread-1047 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1082 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,112 WARN [Thread-1047 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741898_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:46,112 WARN [Thread-1047 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741898_1082 2024-11-14T09:55:46,113 WARN [Thread-1047 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:46,114 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-14T09:55:46,114 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741899_1083 (size=28989) 2024-11-14T09:55:46,117 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741899_1083 (size=28989) 2024-11-14T09:55:46,124 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/.tmp/info/fcc3dd1dad4a4f4cae37bcd18646abe3 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/fcc3dd1dad4a4f4cae37bcd18646abe3 2024-11-14T09:55:46,130 INFO [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in a2dffcd02ef94776ff1269e1753400cb/info of a2dffcd02ef94776ff1269e1753400cb into fcc3dd1dad4a4f4cae37bcd18646abe3(size=28.3 K), total size for store is 28.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:55:46,130 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for a2dffcd02ef94776ff1269e1753400cb: 2024-11-14T09:55:46,131 INFO [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb., storeName=a2dffcd02ef94776ff1269e1753400cb/info, priority=13, startTime=1731578146093; duration=0sec 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/fcc3dd1dad4a4f4cae37bcd18646abe3 because midkey is the same as first or last row 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/fcc3dd1dad4a4f4cae37bcd18646abe3 because midkey is the same as first or last row 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.3 K, sizeToCheck=16.0 K 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/fcc3dd1dad4a4f4cae37bcd18646abe3 because midkey is the same as first or last row 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:55:46,131 DEBUG [RS:0;defc576eb6b7:37839-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: a2dffcd02ef94776ff1269e1753400cb:info 2024-11-14T09:55:46,230 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.1731578130093 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C37839%2C1731578105754.1731578130093 2024-11-14T09:55:46,266 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:55:46,266 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:55:46,267 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:46,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:46,267 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:46,267 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:55:46,268 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:55:46,268 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1609523067, stopped=false 2024-11-14T09:55:46,268 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,36445,1731578105529 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:46,307 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:46,308 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:55:46,308 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:55:46,309 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:46,309 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:46,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:46,309 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:46,310 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:46,310 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,37839,1731578105754' ***** 2024-11-14T09:55:46,310 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:55:46,310 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,35821,1731578107553' ***** 2024-11-14T09:55:46,310 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:55:46,310 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:55:46,311 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:55:46,311 INFO [RS:0;defc576eb6b7:37839 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:55:46,311 INFO [RS:0;defc576eb6b7:37839 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:55:46,311 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(3091): Received CLOSE for a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:46,311 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:55:46,311 INFO [RS:1;defc576eb6b7:35821 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:55:46,311 INFO [RS:1;defc576eb6b7:35821 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:55:46,312 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,35821,1731578107553 2024-11-14T09:55:46,312 INFO [RS:1;defc576eb6b7:35821 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:55:46,312 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:55:46,312 INFO [RS:1;defc576eb6b7:35821 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;defc576eb6b7:35821. 2024-11-14T09:55:46,312 DEBUG [RS:1;defc576eb6b7:35821 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:46,312 DEBUG [RS:1;defc576eb6b7:35821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:46,312 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,37839,1731578105754 2024-11-14T09:55:46,312 INFO [RS:0;defc576eb6b7:37839 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:55:46,312 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,35821,1731578107553; all regions closed. 2024-11-14T09:55:46,312 INFO [RS:0;defc576eb6b7:37839 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:37839. 2024-11-14T09:55:46,312 DEBUG [RS:0;defc576eb6b7:37839 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:55:46,312 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing a2dffcd02ef94776ff1269e1753400cb, disabling compactions & flushes 2024-11-14T09:55:46,312 DEBUG [RS:0;defc576eb6b7:37839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:46,313 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,313 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:55:46,313 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,313 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:55:46,313 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:55:46,313 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,313 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. after waiting 0 ms 2024-11-14T09:55:46,313 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:55:46,313 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,313 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,313 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,313 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,313 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:55:46,313 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, a2dffcd02ef94776ff1269e1753400cb=TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.} 2024-11-14T09:55:46,314 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,314 DEBUG [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, a2dffcd02ef94776ff1269e1753400cb 2024-11-14T09:55:46,314 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:55:46,314 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71] to archive 2024-11-14T09:55:46,314 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:55:46,314 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:55:46,314 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:55:46,314 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:55:46,314 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-14T09:55:46,315 ERROR [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527-prefix:defc576eb6b7,37839,1731578105754.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,315 WARN [FSHLog-0-hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527-prefix:defc576eb6b7,37839,1731578105754.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,315 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:55:46,315 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C37839%2C1731578105754.meta:.meta(num 1731578107391) roll requested 2024-11-14T09:55:46,315 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C37839%2C1731578105754.meta.1731578146315.meta 2024-11-14T09:55:46,317 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf141546b3c0414a9cdab486358fc3b3 2024-11-14T09:55:46,318 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c491676d49ef41eb960a84e7cc9b4ff9 2024-11-14T09:55:46,319 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/cf6a42327f644f649d47727fe2a29245 2024-11-14T09:55:46,320 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,320 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,320 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 2024-11-14T09:55:46,321 WARN [IPC Server handler 2 on default port 37757 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 has not been closed. Lease recovery is in progress. RecoveryId = 1084 for block blk_1073741837_1013 2024-11-14T09:55:46,321 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/5354dc7687c940ff9cf7488c5e504ba0 2024-11-14T09:55:46,321 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 after 1ms 2024-11-14T09:55:46,323 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/ed35f13c34a04478ab7d89d9f8ad30f0 2024-11-14T09:55:46,323 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1085 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45887 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,323 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60886 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085 to mirror 127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,324 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:46,324 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60886 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:46,324 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085 2024-11-14T09:55:46,324 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60886 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741900_1085] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60886 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,325 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:46,326 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/303e053db9b14282b8307fbd3233a067 2024-11-14T09:55:46,327 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/f677b6520c8a47f098801e2f172cc069 2024-11-14T09:55:46,328 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/c76539503c15469498a712e37c7e4472 2024-11-14T09:55:46,330 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/info/247b7ba4e329457c920fb216412abf71 2024-11-14T09:55:46,330 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=defc576eb6b7:36445 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T09:55:46,330 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [cf141546b3c0414a9cdab486358fc3b3=10347, c491676d49ef41eb960a84e7cc9b4ff9=12506, cf6a42327f644f649d47727fe2a29245=17994, 5354dc7687c940ff9cf7488c5e504ba0=6027, ed35f13c34a04478ab7d89d9f8ad30f0=6027, 303e053db9b14282b8307fbd3233a067=18097, f677b6520c8a47f098801e2f172cc069=6027, c76539503c15469498a712e37c7e4472=9267, 247b7ba4e329457c920fb216412abf71=13583] 2024-11-14T09:55:46,333 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45521 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,333 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59252 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4]'}, localName='127.0.0.1:44627', datanodeUuid='0be7c2c7-e088-4f1f-9297-63c81f0708a9', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086 to mirror 127.0.0.1:45521 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,333 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:46,333 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086 2024-11-14T09:55:46,333 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59252 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086] {}] datanode.BlockReceiver(316): Block 1073741901 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-14T09:55:46,333 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59252 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741901_1086] {}] datanode.DataXceiver(331): 127.0.0.1:44627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59252 dst: /127.0.0.1:44627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,334 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:46,336 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/default/TestLogRolling-testLogRollOnDatanodeDeath/a2dffcd02ef94776ff1269e1753400cb/recovered.edits/82.seqid, newMaxSeqId=82, maxSeqId=1 2024-11-14T09:55:46,337 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,337 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for a2dffcd02ef94776ff1269e1753400cb: Waiting for close lock at 1731578146312Running coprocessor pre-close hooks at 1731578146312Disabling compacts and flushes for region at 1731578146312Disabling writes for close at 1731578146313 (+1 ms)Writing region close event to WAL at 1731578146332 (+19 ms)Running coprocessor post-close hooks at 1731578146337 (+5 ms)Closed at 1731578146337 2024-11-14T09:55:46,337 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb. 2024-11-14T09:55:46,343 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,343 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,343 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,343 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,343 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,343 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578146315.meta 2024-11-14T09:55:46,344 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,344 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta 2024-11-14T09:55:46,345 WARN [IPC Server handler 3 on default port 37757 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta has not been closed. Lease recovery is in progress. RecoveryId = 1088 for block blk_1073741834_1010 2024-11-14T09:55:46,345 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta after 1ms 2024-11-14T09:55:46,346 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37373:37373),(127.0.0.1/127.0.0.1:33389:33389)] 2024-11-14T09:55:46,347 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta is not closed yet, will try archiving it next time 2024-11-14T09:55:46,362 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/info/71f0e4121a5a4a349283c29c17ca7b3d is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1731578107722.a2dffcd02ef94776ff1269e1753400cb./info:regioninfo/1731578108097/Put/seqid=0 2024-11-14T09:55:46,365 WARN [Thread-1063 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45887 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,365 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60912 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10]'}, localName='127.0.0.1:35025', datanodeUuid='b3b415c1-9ae9-4ede-a40c-9426a0a5d617', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089 to mirror 127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,365 WARN [Thread-1063 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:46,365 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60912 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:46,365 WARN [Thread-1063 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089 2024-11-14T09:55:46,365 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:60912 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:35025:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60912 dst: /127.0.0.1:35025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,366 WARN [Thread-1063 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:46,370 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741904_1090 (size=7089) 2024-11-14T09:55:46,372 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741904_1090 (size=7089) 2024-11-14T09:55:46,373 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/info/71f0e4121a5a4a349283c29c17ca7b3d 2024-11-14T09:55:46,395 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/ns/d93d84c629dd4502a20d4eb0be058201 is 43, key is default/ns:d/1731578107477/Put/seqid=0 2024-11-14T09:55:46,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741905_1091 (size=5153) 2024-11-14T09:55:46,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741905_1091 (size=5153) 2024-11-14T09:55:46,402 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/ns/d93d84c629dd4502a20d4eb0be058201 2024-11-14T09:55:46,431 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/table/d3da4625a118444bba4c9e6aa3528762 is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1731578108126/Put/seqid=0 2024-11-14T09:55:46,434 WARN [Thread-1076 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741906_1092 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:45887 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59298 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4]'}, localName='127.0.0.1:44627', datanodeUuid='0be7c2c7-e088-4f1f-9297-63c81f0708a9', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092 to mirror 127.0.0.1:45887 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,434 WARN [Thread-1076 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK]) is bad. 2024-11-14T09:55:46,434 WARN [Thread-1076 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092 2024-11-14T09:55:46,434 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59298 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092] {}] datanode.BlockReceiver(316): Block 1073741906 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:46,434 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59298 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741906_1092] {}] datanode.DataXceiver(331): 127.0.0.1:44627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59298 dst: /127.0.0.1:44627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,435 WARN [Thread-1076 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45887,DS-7f28020a-5b16-4253-b525-24e8a83eb3a8,DISK] 2024-11-14T09:55:46,436 WARN [Thread-1076 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,437 WARN [Thread-1076 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK], DatanodeInfoWithStorage[127.0.0.1:35025,DS-bc57484a-14f6-4106-8149-8f45493364a3,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK]) is bad. 2024-11-14T09:55:46,437 WARN [Thread-1076 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741907_1093 2024-11-14T09:55:46,437 WARN [Thread-1076 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:45521,DS-274ed8a0-d09f-4e56-9d9e-93609a04c8b2,DISK] 2024-11-14T09:55:46,440 WARN [Thread-1076 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741908_1094 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:38169 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:55:46,440 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59304 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4]'}, localName='127.0.0.1:44627', datanodeUuid='0be7c2c7-e088-4f1f-9297-63c81f0708a9', xmitsInProgress=0}:Exception transferring block BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094 to mirror 127.0.0.1:38169 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,440 WARN [Thread-1076 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094 in pipeline [DatanodeInfoWithStorage[127.0.0.1:44627,DS-44d46d50-330e-43d6-8007-981da901f360,DISK], DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK]) is bad. 2024-11-14T09:55:46,440 WARN [Thread-1076 {}] hdfs.DataStreamer(1850): Abandoning BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094 2024-11-14T09:55:46,440 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59304 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094] {}] datanode.BlockReceiver(316): Block 1073741908 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-14T09:55:46,440 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-947652036_22 at /127.0.0.1:59304 [Receiving block BP-1062630699-172.17.0.2-1731578103400:blk_1073741908_1094] {}] datanode.DataXceiver(331): 127.0.0.1:44627:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59304 dst: /127.0.0.1:44627 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:46,441 WARN [Thread-1076 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:38169,DS-2848e0e3-a691-415c-a192-340088078901,DISK] 2024-11-14T09:55:46,445 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741909_1095 (size=5424) 2024-11-14T09:55:46,446 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741909_1095 (size=5424) 2024-11-14T09:55:46,446 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/table/d3da4625a118444bba4c9e6aa3528762 2024-11-14T09:55:46,454 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/info/71f0e4121a5a4a349283c29c17ca7b3d as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/info/71f0e4121a5a4a349283c29c17ca7b3d 2024-11-14T09:55:46,462 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/info/71f0e4121a5a4a349283c29c17ca7b3d, entries=10, sequenceid=11, filesize=6.9 K 2024-11-14T09:55:46,464 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/ns/d93d84c629dd4502a20d4eb0be058201 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/ns/d93d84c629dd4502a20d4eb0be058201 2024-11-14T09:55:46,472 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/ns/d93d84c629dd4502a20d4eb0be058201, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:55:46,473 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/.tmp/table/d3da4625a118444bba4c9e6aa3528762 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/table/d3da4625a118444bba4c9e6aa3528762 2024-11-14T09:55:46,484 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/table/d3da4625a118444bba4c9e6aa3528762, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:55:46,486 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false 2024-11-14T09:55:46,495 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:55:46,495 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:55:46,496 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:46,496 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578146314Running coprocessor pre-close hooks at 1731578146314Disabling compacts and flushes for region at 1731578146314Disabling writes for close at 1731578146314Obtaining lock to block concurrent updates at 1731578146314Preparing flush snapshotting stores in 1588230740 at 1731578146314Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1731578146315 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731578146347 (+32 ms)Flushing 1588230740/info: creating writer at 1731578146347Flushing 1588230740/info: appending metadata at 1731578146362 (+15 ms)Flushing 1588230740/info: closing flushed file at 1731578146362Flushing 1588230740/ns: creating writer at 1731578146380 (+18 ms)Flushing 1588230740/ns: appending metadata at 1731578146395 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731578146395Flushing 1588230740/table: creating writer at 1731578146408 (+13 ms)Flushing 1588230740/table: appending metadata at 1731578146430 (+22 ms)Flushing 1588230740/table: closing flushed file at 1731578146430Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@41da59c1: reopening flushed file at 1731578146453 (+23 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@cc21992: reopening flushed file at 1731578146463 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5bbbef59: reopening flushed file at 1731578146472 (+9 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 172ms, sequenceid=11, compaction requested=false at 1731578146486 (+14 ms)Writing region close event to WAL at 1731578146489 (+3 ms)Running coprocessor post-close hooks at 1731578146495 (+6 ms)Closed at 1731578146496 (+1 ms) 2024-11-14T09:55:46,496 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:46,514 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,37839,1731578105754; all regions closed. 2024-11-14T09:55:46,514 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,514 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,515 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,515 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,515 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:46,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741902_1087 (size=825) 2024-11-14T09:55:46,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741902_1087 (size=825) 2024-11-14T09:55:46,686 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:55:46,686 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:55:46,869 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:55:46,869 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:55:46,871 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:46,907 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@357e2425[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:35025, datanodeUuid=b3b415c1-9ae9-4ede-a40c-9426a0a5d617, infoPort=33389, infoSecurePort=0, ipcPort=41023, storageInfo=lv=-57;cid=testClusterID;nsid=1388361459;c=1731578103400):Failed to transfer BP-1062630699-172.17.0.2-1731578103400:blk_1073741878_1061 to 127.0.0.1:45521 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:47,499 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:55:47,499 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:55:47,688 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:49,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741836_1012 (size=76) 2024-11-14T09:55:50,323 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 after 4003ms 2024-11-14T09:55:50,346 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta after 4002ms 2024-11-14T09:55:50,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:50,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:51,321 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T09:55:51,324 DEBUG [RS:1;defc576eb6b7:35821 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs 2024-11-14T09:55:51,324 INFO [RS:1;defc576eb6b7:35821 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C35821%2C1731578107553:(num 1731578107825) 2024-11-14T09:55:51,325 DEBUG [RS:1;defc576eb6b7:35821 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:51,325 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:51,325 INFO [RS:1;defc576eb6b7:35821 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:55:51,325 INFO [RS:1;defc576eb6b7:35821 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:55:51,325 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:55:51,326 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:55:51,326 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:55:51,326 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:55:51,326 INFO [RS:1;defc576eb6b7:35821 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:55:51,326 INFO [RS:1;defc576eb6b7:35821 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35821 2024-11-14T09:55:51,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor105.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:51,338 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,351 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,352 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,368 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:51,369 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,35821,1731578107553 2024-11-14T09:55:51,369 INFO [RS:1;defc576eb6b7:35821 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:55:51,380 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,35821,1731578107553] 2024-11-14T09:55:51,390 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,35821,1731578107553 already deleted, retry=false 2024-11-14T09:55:51,390 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,35821,1731578107553 expired; onlineServers=1 2024-11-14T09:55:51,480 INFO [RS:1;defc576eb6b7:35821 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:55:51,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:51,480 INFO [RS:1;defc576eb6b7:35821 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,35821,1731578107553; zookeeper connection closed. 2024-11-14T09:55:51,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35821-0x10138c50a4f0002, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:51,480 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@684f677d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@684f677d 2024-11-14T09:55:51,515 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-14T09:55:51,518 DEBUG [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs 2024-11-14T09:55:51,518 INFO [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C37839%2C1731578105754.meta:.meta(num 1731578146315) 2024-11-14T09:55:51,519 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,519 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,519 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,519 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,519 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741894_1078 (size=16308) 2024-11-14T09:55:51,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741894_1078 (size=16308) 2024-11-14T09:55:51,523 DEBUG [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs 2024-11-14T09:55:51,523 INFO [RS:0;defc576eb6b7:37839 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C37839%2C1731578105754:(num 1731578145811) 2024-11-14T09:55:51,523 DEBUG [RS:0;defc576eb6b7:37839 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:51,523 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:55:51,523 INFO [RS:0;defc576eb6b7:37839 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:55:51,523 INFO [RS:0;defc576eb6b7:37839 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:55:51,523 INFO [RS:0;defc576eb6b7:37839 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:55:51,523 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:55:51,524 INFO [RS:0;defc576eb6b7:37839 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:37839 2024-11-14T09:55:51,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:51,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,37839,1731578105754 2024-11-14T09:55:51,536 INFO [RS:0;defc576eb6b7:37839 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:55:51,548 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,37839,1731578105754] 2024-11-14T09:55:51,559 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,37839,1731578105754 already deleted, retry=false 2024-11-14T09:55:51,559 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,37839,1731578105754 expired; onlineServers=0 2024-11-14T09:55:51,559 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,36445,1731578105529' ***** 2024-11-14T09:55:51,559 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:55:51,559 INFO [M:0;defc576eb6b7:36445 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:55:51,559 INFO [M:0;defc576eb6b7:36445 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:55:51,559 DEBUG [M:0;defc576eb6b7:36445 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:55:51,560 DEBUG [M:0;defc576eb6b7:36445 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:55:51,560 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:55:51,560 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578106650 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578106650,5,FailOnTimeoutGroup] 2024-11-14T09:55:51,560 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578106651 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578106651,5,FailOnTimeoutGroup] 2024-11-14T09:55:51,560 INFO [M:0;defc576eb6b7:36445 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:55:51,560 INFO [M:0;defc576eb6b7:36445 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:55:51,560 DEBUG [M:0;defc576eb6b7:36445 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:55:51,560 INFO [M:0;defc576eb6b7:36445 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:55:51,561 INFO [M:0;defc576eb6b7:36445 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:55:51,561 INFO [M:0;defc576eb6b7:36445 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:55:51,561 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:55:51,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:55:51,569 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:51,569 DEBUG [M:0;defc576eb6b7:36445 {}] zookeeper.ZKUtil(347): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:55:51,570 WARN [M:0;defc576eb6b7:36445 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:55:51,571 INFO [M:0;defc576eb6b7:36445 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/.lastflushedseqids 2024-11-14T09:55:51,580 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741910_1096 (size=130) 2024-11-14T09:55:51,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741910_1096 (size=130) 2024-11-14T09:55:51,581 INFO [M:0;defc576eb6b7:36445 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:55:51,581 INFO [M:0;defc576eb6b7:36445 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:55:51,581 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:55:51,582 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:51,582 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:51,582 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:55:51,582 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:51,582 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-14T09:55:51,597 DEBUG [M:0;defc576eb6b7:36445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb3d015c5150424ea0a49a33a8973985 is 82, key is hbase:meta,,1/info:regioninfo/1731578107428/Put/seqid=0 2024-11-14T09:55:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741911_1097 (size=5672) 2024-11-14T09:55:51,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741911_1097 (size=5672) 2024-11-14T09:55:51,602 INFO [M:0;defc576eb6b7:36445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb3d015c5150424ea0a49a33a8973985 2024-11-14T09:55:51,622 DEBUG [M:0;defc576eb6b7:36445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/421e1682043e44ab91b160d35850176a is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731578108132/Put/seqid=0 2024-11-14T09:55:51,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741912_1098 (size=6255) 2024-11-14T09:55:51,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741912_1098 (size=6255) 2024-11-14T09:55:51,627 INFO [M:0;defc576eb6b7:36445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/421e1682043e44ab91b160d35850176a 2024-11-14T09:55:51,631 INFO [M:0;defc576eb6b7:36445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 421e1682043e44ab91b160d35850176a 2024-11-14T09:55:51,645 DEBUG [M:0;defc576eb6b7:36445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1f21a441e4841a49b6803001327a585 is 69, key is defc576eb6b7,35821,1731578107553/rs:state/1731578107652/Put/seqid=0 2024-11-14T09:55:51,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:51,648 INFO [RS:0;defc576eb6b7:37839 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:55:51,648 INFO [RS:0;defc576eb6b7:37839 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,37839,1731578105754; zookeeper connection closed. 2024-11-14T09:55:51,648 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:37839-0x10138c50a4f0001, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:51,649 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@760b35d9 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@760b35d9 2024-11-14T09:55:51,649 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-14T09:55:51,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741913_1099 (size=5224) 2024-11-14T09:55:51,649 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741913_1099 (size=5224) 2024-11-14T09:55:51,650 INFO [M:0;defc576eb6b7:36445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1f21a441e4841a49b6803001327a585 2024-11-14T09:55:51,674 DEBUG [M:0;defc576eb6b7:36445 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a888831b0b8a4272bceab577353e3cee is 52, key is load_balancer_on/state:d/1731578107535/Put/seqid=0 2024-11-14T09:55:51,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741914_1100 (size=5056) 2024-11-14T09:55:51,679 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741914_1100 (size=5056) 2024-11-14T09:55:51,679 INFO [M:0;defc576eb6b7:36445 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a888831b0b8a4272bceab577353e3cee 2024-11-14T09:55:51,685 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cb3d015c5150424ea0a49a33a8973985 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb3d015c5150424ea0a49a33a8973985 2024-11-14T09:55:51,690 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cb3d015c5150424ea0a49a33a8973985, entries=8, sequenceid=60, filesize=5.5 K 2024-11-14T09:55:51,691 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/421e1682043e44ab91b160d35850176a as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/421e1682043e44ab91b160d35850176a 2024-11-14T09:55:51,696 INFO [M:0;defc576eb6b7:36445 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 421e1682043e44ab91b160d35850176a 2024-11-14T09:55:51,696 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/421e1682043e44ab91b160d35850176a, entries=6, sequenceid=60, filesize=6.1 K 2024-11-14T09:55:51,697 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1f21a441e4841a49b6803001327a585 as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1f21a441e4841a49b6803001327a585 2024-11-14T09:55:51,702 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1f21a441e4841a49b6803001327a585, entries=2, sequenceid=60, filesize=5.1 K 2024-11-14T09:55:51,703 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/a888831b0b8a4272bceab577353e3cee as hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a888831b0b8a4272bceab577353e3cee 2024-11-14T09:55:51,708 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/a888831b0b8a4272bceab577353e3cee, entries=1, sequenceid=60, filesize=4.9 K 2024-11-14T09:55:51,710 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false 2024-11-14T09:55:51,711 INFO [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:51,711 DEBUG [M:0;defc576eb6b7:36445 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578151581Disabling compacts and flushes for region at 1731578151581Disabling writes for close at 1731578151582 (+1 ms)Obtaining lock to block concurrent updates at 1731578151582Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578151582Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1731578151582Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578151583 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578151583Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578151596 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578151596Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578151608 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578151621 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578151621Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578151632 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578151644 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578151644Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578151655 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578151673 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578151673Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ac65e51: reopening flushed file at 1731578151684 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@308044a8: reopening flushed file at 1731578151690 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@69a24a63: reopening flushed file at 1731578151696 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@eba92b4: reopening flushed file at 1731578151702 (+6 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 127ms, sequenceid=60, compaction requested=false at 1731578151710 (+8 ms)Writing region close event to WAL at 1731578151711 (+1 ms)Closed at 1731578151711 2024-11-14T09:55:51,712 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,716 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,716 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,716 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,716 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:55:51,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44627 is added to blk_1073741890_1073 (size=1045) 2024-11-14T09:55:51,718 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741890_1073 (size=1045) 2024-11-14T09:55:51,870 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:55:51,889 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,890 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:51,897 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:52,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-14T09:55:52,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:55:52,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:55:52,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:55:52,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:52,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:52,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741835_1011 (size=393) 2024-11-14T09:55:52,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:52,689 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@40f2a9e4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:45521,null,null]) java.net.ConnectException: Call From defc576eb6b7/172.17.0.2 to localhost:40099 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:55:53,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:53,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:53,667 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:53,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35025 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:53,682 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/WALs/defc576eb6b7,36445,1731578105529/defc576eb6b7%2C36445%2C1731578105529.1731578106356 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/oldWALs/defc576eb6b7%2C36445%2C1731578105529.1731578106356 2024-11-14T09:55:53,685 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/MasterData/oldWALs/defc576eb6b7%2C36445%2C1731578105529.1731578106356 to hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/oldWALs/defc576eb6b7%2C36445%2C1731578105529.1731578106356$masterlocalwal$ 2024-11-14T09:55:53,685 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:55:53,685 INFO [M:0;defc576eb6b7:36445 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:55:53,685 INFO [M:0;defc576eb6b7:36445 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36445 2024-11-14T09:55:53,685 INFO [M:0;defc576eb6b7:36445 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:55:53,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:53,859 INFO [M:0;defc576eb6b7:36445 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:55:53,859 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:36445-0x10138c50a4f0000, quorum=127.0.0.1:59567, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:55:53,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33459d{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:53,863 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3478c7d1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:53,863 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:53,863 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6dce0e6b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:53,864 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7eb80544{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:53,865 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@381fde50 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:45521,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:40099 , LocalHost:localPort defc576eb6b7/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-14T09:55:53,865 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:53,865 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:53,865 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:53,865 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid 0be7c2c7-e088-4f1f-9297-63c81f0708a9) service to localhost/127.0.0.1:37757 2024-11-14T09:55:53,865 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@381fde50 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1062630699-172.17.0.2-1731578103400:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45521,null,null], DatanodeInfoWithStorage[127.0.0.1:44627,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-1062630699-172.17.0.2-1731578103400 2024-11-14T09:55:53,865 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@381fde50 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:44627,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1062630699-172.17.0.2-1731578103400 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:53,866 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@381fde50 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45521,null,null]) java.io.IOException: No block pool offer service for bpid=BP-1062630699-172.17.0.2-1731578103400 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:55:53,866 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data3/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:53,866 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@381fde50 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:44627,null,null], DatanodeInfoWithStorage[127.0.0.1:45521,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-1062630699-172.17.0.2-1731578103400:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:44627,null,null], DatanodeInfoWithStorage[127.0.0.1:45521,null,null]] 2024-11-14T09:55:53,866 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data4/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:53,866 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:53,868 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4b81e014{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:53,868 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33f41830{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:53,868 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:53,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43081444{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:53,869 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@36e646c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:53,870 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:55:53,870 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:55:53,870 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:55:53,870 WARN [BP-1062630699-172.17.0.2-1731578103400 heartbeating to localhost/127.0.0.1:37757 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1062630699-172.17.0.2-1731578103400 (Datanode Uuid b3b415c1-9ae9-4ede-a40c-9426a0a5d617) service to localhost/127.0.0.1:37757 2024-11-14T09:55:53,870 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data9/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:53,870 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/cluster_ec9cebd8-3a4c-42a3-6246-575dbe7e0d07/data/data10/current/BP-1062630699-172.17.0.2-1731578103400 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:55:53,871 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:55:53,875 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5ad1779c{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:55:53,876 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@d2dba01{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:55:53,876 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:55:53,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@77dbc458{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:55:53,876 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@237b8284{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir/,STOPPED} 2024-11-14T09:55:53,883 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:55:53,912 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:55:53,918 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37757 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f84e8befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:37757 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f84e8befdc8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37757 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:37757 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37757 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:35719 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37757 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37757 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:35719 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:37757 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=452 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=144 (was 115) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=4426 (was 5308) 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=452, MaxFileDescriptor=1048576, SystemLoadAverage=144, ProcessCount=11, AvailableMemoryMB=4426 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.log.dir so I do NOT create it in target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b3de7ff1-46ea-deb8-efcc-307d0ca7f394/hadoop.tmp.dir so I do NOT create it in target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5, deleteOnExit=true 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/test.cache.data in system properties and HBase conf 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:55:53,925 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:55:53,926 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:55:53,926 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:55:53,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:55:53,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:55:53,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:55:53,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:55:53,927 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:55:53,937 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:55:54,305 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:54,309 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:54,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:54,310 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:54,310 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:55:54,311 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:54,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@8df6f39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:54,312 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@35d68916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:54,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:54,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:54,405 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40ce61ea{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-33727-hadoop-hdfs-3_4_1-tests_jar-_-any-17969015713708398221/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:55:54,405 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3734ddc7{HTTP/1.1, (http/1.1)}{localhost:33727} 2024-11-14T09:55:54,406 INFO [Time-limited test {}] server.Server(415): Started @159041ms 2024-11-14T09:55:54,416 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:55:54,659 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:54,663 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:54,664 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:54,664 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:54,664 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:55:54,664 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b3376d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:54,665 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7f6bebc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:54,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@131e974b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-46843-hadoop-hdfs-3_4_1-tests_jar-_-any-11658885242003968420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:54,761 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5aff23e7{HTTP/1.1, (http/1.1)}{localhost:46843} 2024-11-14T09:55:54,762 INFO [Time-limited test {}] server.Server(415): Started @159397ms 2024-11-14T09:55:54,763 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:54,791 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:55:54,794 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:55:54,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:55:54,795 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:55:54,795 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:55:54,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@21998c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:55:54,796 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@794cb94f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:55:54,891 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@aae1781{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-35347-hadoop-hdfs-3_4_1-tests_jar-_-any-16291442633541193396/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:55:54,892 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4ef0fe27{HTTP/1.1, (http/1.1)}{localhost:35347} 2024-11-14T09:55:54,892 INFO [Time-limited test {}] server.Server(415): Started @159527ms 2024-11-14T09:55:54,893 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:55:55,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:55,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:55,888 WARN [Thread-1219 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data1/current/BP-1086040793-172.17.0.2-1731578153948/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:55,888 WARN [Thread-1220 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data2/current/BP-1086040793-172.17.0.2-1731578153948/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:55,907 WARN [Thread-1183 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f8e650f27b74de3 with lease ID 0x7c010e6d37478fce: Processing first storage report for DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2 from datanode DatanodeRegistration(127.0.0.1:33763, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=42971, infoSecurePort=0, ipcPort=46135, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948) 2024-11-14T09:55:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f8e650f27b74de3 with lease ID 0x7c010e6d37478fce: from storage DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2 node DatanodeRegistration(127.0.0.1:33763, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=42971, infoSecurePort=0, ipcPort=46135, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x1f8e650f27b74de3 with lease ID 0x7c010e6d37478fce: Processing first storage report for DS-63aa3513-71b6-4183-a315-2fac647fa369 from datanode DatanodeRegistration(127.0.0.1:33763, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=42971, infoSecurePort=0, ipcPort=46135, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948) 2024-11-14T09:55:55,909 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x1f8e650f27b74de3 with lease ID 0x7c010e6d37478fce: from storage DS-63aa3513-71b6-4183-a315-2fac647fa369 node DatanodeRegistration(127.0.0.1:33763, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=42971, infoSecurePort=0, ipcPort=46135, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:56,027 WARN [Thread-1230 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data3/current/BP-1086040793-172.17.0.2-1731578153948/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:56,027 WARN [Thread-1231 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data4/current/BP-1086040793-172.17.0.2-1731578153948/current, will proceed with Du for space computation calculation, 2024-11-14T09:55:56,047 WARN [Thread-1206 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:55:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x306a54776b678686 with lease ID 0x7c010e6d37478fcf: Processing first storage report for DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd from datanode DatanodeRegistration(127.0.0.1:39187, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=36649, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948) 2024-11-14T09:55:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x306a54776b678686 with lease ID 0x7c010e6d37478fcf: from storage DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd node DatanodeRegistration(127.0.0.1:39187, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=36649, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:55:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x306a54776b678686 with lease ID 0x7c010e6d37478fcf: Processing first storage report for DS-868ea760-9830-4351-9815-14d115745793 from datanode DatanodeRegistration(127.0.0.1:39187, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=36649, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948) 2024-11-14T09:55:56,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x306a54776b678686 with lease ID 0x7c010e6d37478fcf: from storage DS-868ea760-9830-4351-9815-14d115745793 node DatanodeRegistration(127.0.0.1:39187, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=36649, infoSecurePort=0, ipcPort=37265, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:55:56,137 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d 2024-11-14T09:55:56,140 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/zookeeper_0, clientPort=54564, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:55:56,141 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54564 2024-11-14T09:55:56,142 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,143 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:56,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:55:56,156 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902 with version=8 2024-11-14T09:55:56,157 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:55:56,159 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:56,159 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:55:56,160 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:56,160 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:38147 2024-11-14T09:55:56,162 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:38147 connecting to ZooKeeper ensemble=127.0.0.1:54564 2024-11-14T09:55:56,247 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:381470x0, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:56,248 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:38147-0x10138c5d0170000 connected 2024-11-14T09:55:56,332 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,337 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:56,337 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902, hbase.cluster.distributed=false 2024-11-14T09:55:56,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:56,340 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:56,340 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=38147 2024-11-14T09:55:56,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=38147 2024-11-14T09:55:56,341 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=38147 2024-11-14T09:55:56,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=38147 2024-11-14T09:55:56,342 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=38147 2024-11-14T09:55:56,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:56,359 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:55:56,360 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:55:56,361 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46249 2024-11-14T09:55:56,362 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:46249 connecting to ZooKeeper ensemble=127.0.0.1:54564 2024-11-14T09:55:56,363 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,364 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:462490x0, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:55:56,374 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:462490x0, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:55:56,374 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:46249-0x10138c5d0170001 connected 2024-11-14T09:55:56,375 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:55:56,375 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:55:56,376 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:55:56,377 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:55:56,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46249 2024-11-14T09:55:56,379 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46249 2024-11-14T09:55:56,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46249 2024-11-14T09:55:56,380 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46249 2024-11-14T09:55:56,382 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46249 2024-11-14T09:55:56,398 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:38147 2024-11-14T09:55:56,398 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:56,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:56,406 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:55:56,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,416 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,416 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:55:56,417 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,38147,1731578156159 from backup master directory 2024-11-14T09:55:56,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:56,426 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:55:56,426 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:56,426 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,431 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/hbase.id] with ID: 3b0af3b9-137f-4d16-9c40-e179bc8e72b4 2024-11-14T09:55:56,431 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/.tmp/hbase.id 2024-11-14T09:55:56,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:56,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:55:56,437 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/.tmp/hbase.id]:[hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/hbase.id] 2024-11-14T09:55:56,449 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:56,449 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:55:56,450 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:55:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,458 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:56,464 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:55:56,465 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:55:56,465 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:55:56,466 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:56,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:55:56,474 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store 2024-11-14T09:55:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:56,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:55:56,482 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:56,482 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:55:56,482 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578156482Disabling compacts and flushes for region at 1731578156482Disabling writes for close at 1731578156482Writing region close event to WAL at 1731578156482Closed at 1731578156482 2024-11-14T09:55:56,483 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/.initializing 2024-11-14T09:55:56,483 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C38147%2C1731578156159, suffix=, logDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159, archiveDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/oldWALs, maxLogs=10 2024-11-14T09:55:56,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C38147%2C1731578156159.1731578156486 2024-11-14T09:55:56,491 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 2024-11-14T09:55:56,492 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36649:36649),(127.0.0.1/127.0.0.1:42971:42971)] 2024-11-14T09:55:56,497 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:56,497 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:56,497 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,497 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,500 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,502 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:55:56,502 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:56,503 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,504 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:55:56,504 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:56,505 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:55:56,507 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,507 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:56,508 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,509 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:55:56,509 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,510 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:56,510 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,510 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,511 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,512 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,512 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,513 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:55:56,514 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:55:56,516 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:56,516 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=764456, jitterRate=-0.027944311499595642}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:55:56,517 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578156497Initializing all the Stores at 1731578156498 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578156498Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578156500 (+2 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578156500Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578156500Cleaning up temporary data from old regions at 1731578156512 (+12 ms)Region opened successfully at 1731578156517 (+5 ms) 2024-11-14T09:55:56,517 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:55:56,520 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7e860d69, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:56,521 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:55:56,521 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:55:56,521 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:55:56,522 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:55:56,522 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:55:56,522 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:55:56,522 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:55:56,524 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:55:56,525 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:55:56,531 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:55:56,532 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:55:56,533 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:55:56,542 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:55:56,543 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:55:56,544 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:55:56,553 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:55:56,554 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:55:56,563 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:55:56,565 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:55:56,574 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:55:56,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:56,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:55:56,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,584 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,585 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,38147,1731578156159, sessionid=0x10138c5d0170000, setting cluster-up flag (Was=false) 2024-11-14T09:55:56,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,605 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,637 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:55:56,640 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,658 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:56,690 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:55:56,692 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,38147,1731578156159 2024-11-14T09:55:56,694 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:55:56,696 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:56,697 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:55:56,697 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:55:56,697 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,38147,1731578156159 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:55:56,700 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:56,700 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:56,700 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:56,700 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:55:56,701 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:55:56,701 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,701 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:56,701 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,702 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578186702 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:55:56,703 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,704 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:56,704 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:55:56,704 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:55:56,704 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:55:56,704 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:55:56,705 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:55:56,705 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:55:56,705 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578156705,5,FailOnTimeoutGroup] 2024-11-14T09:55:56,705 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578156705,5,FailOnTimeoutGroup] 2024-11-14T09:55:56,705 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,706 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:55:56,706 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,706 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,706 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,706 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:55:56,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:56,713 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:55:56,714 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:55:56,714 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902 2024-11-14T09:55:56,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:56,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:55:56,722 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:56,723 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:56,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:56,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:56,725 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:56,726 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:56,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:56,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:56,728 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:56,728 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:56,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:56,730 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:56,730 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:56,731 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:56,731 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:56,731 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740 2024-11-14T09:55:56,732 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740 2024-11-14T09:55:56,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:56,733 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:56,734 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:56,735 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:56,737 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:56,738 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=742992, jitterRate=-0.05523727834224701}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578156722Initializing all the Stores at 1731578156723 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578156723Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578156723Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578156723Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578156723Cleaning up temporary data from old regions at 1731578156733 (+10 ms)Region opened successfully at 1731578156739 (+6 ms) 2024-11-14T09:55:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:55:56,739 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:55:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:55:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:55:56,739 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:55:56,740 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:55:56,740 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578156739Disabling compacts and flushes for region at 1731578156739Disabling writes for close at 1731578156739Writing region close event to WAL at 1731578156740 (+1 ms)Closed at 1731578156740 2024-11-14T09:55:56,742 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:56,742 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:55:56,742 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:55:56,743 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:56,745 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:55:56,786 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(746): ClusterId : 3b0af3b9-137f-4d16-9c40-e179bc8e72b4 2024-11-14T09:55:56,787 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:55:56,797 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:55:56,797 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:55:56,808 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:55:56,808 DEBUG [RS:0;defc576eb6b7:46249 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4120ff8e, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:55:56,824 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:46249 2024-11-14T09:55:56,824 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:55:56,824 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:55:56,824 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:55:56,825 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,38147,1731578156159 with port=46249, startcode=1731578156359 2024-11-14T09:55:56,825 DEBUG [RS:0;defc576eb6b7:46249 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:55:56,827 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:55:56,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38147 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,827 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=38147 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,829 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902 2024-11-14T09:55:56,829 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41049 2024-11-14T09:55:56,829 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:55:56,837 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:55:56,838 DEBUG [RS:0;defc576eb6b7:46249 {}] zookeeper.ZKUtil(111): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,838 WARN [RS:0;defc576eb6b7:46249 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:55:56,838 INFO [RS:0;defc576eb6b7:46249 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:56,838 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,46249,1731578156359] 2024-11-14T09:55:56,838 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,841 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:55:56,844 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:55:56,844 INFO [RS:0;defc576eb6b7:46249 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:55:56,844 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,848 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:55:56,849 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:55:56,849 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,849 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,849 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,849 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:56,850 DEBUG [RS:0;defc576eb6b7:46249 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,851 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46249,1731578156359-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:56,869 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:55:56,870 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46249,1731578156359-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,870 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,870 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.Replication(171): defc576eb6b7,46249,1731578156359 started 2024-11-14T09:55:56,882 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:56,883 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,46249,1731578156359, RpcServer on defc576eb6b7/172.17.0.2:46249, sessionid=0x10138c5d0170001 2024-11-14T09:55:56,883 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:55:56,883 DEBUG [RS:0;defc576eb6b7:46249 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,883 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,46249,1731578156359' 2024-11-14T09:55:56,883 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:55:56,883 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,46249,1731578156359 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,46249,1731578156359' 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:55:56,884 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:55:56,885 DEBUG [RS:0;defc576eb6b7:46249 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:55:56,885 INFO [RS:0;defc576eb6b7:46249 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:55:56,885 INFO [RS:0;defc576eb6b7:46249 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:55:56,895 WARN [defc576eb6b7:38147 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:55:56,989 INFO [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C46249%2C1731578156359, suffix=, logDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359, archiveDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs, maxLogs=32 2024-11-14T09:55:56,991 INFO [RS:0;defc576eb6b7:46249 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:55:56,999 INFO [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:55:57,000 DEBUG [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36649:36649),(127.0.0.1/127.0.0.1:42971:42971)] 2024-11-14T09:55:57,145 DEBUG [defc576eb6b7:38147 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:55:57,146 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:57,150 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,46249,1731578156359, state=OPENING 2024-11-14T09:55:57,206 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:55:57,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:57,216 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:55:57,218 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:55:57,219 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,46249,1731578156359}] 2024-11-14T09:55:57,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:57,219 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:57,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:57,351 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:57,375 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:55:57,380 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34109, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:55:57,386 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:55:57,387 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:55:57,389 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C46249%2C1731578156359.meta, suffix=.meta, logDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359, archiveDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs, maxLogs=32 2024-11-14T09:55:57,389 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta 2024-11-14T09:55:57,398 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta 2024-11-14T09:55:57,404 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42971:42971),(127.0.0.1/127.0.0.1:36649:36649)] 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:55:57,407 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:55:57,407 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:55:57,409 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:55:57,410 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:55:57,410 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:57,411 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:55:57,412 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:55:57,412 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:57,413 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:55:57,414 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:55:57,414 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:57,414 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:55:57,415 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:55:57,415 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,416 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:55:57,416 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:55:57,417 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740 2024-11-14T09:55:57,418 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740 2024-11-14T09:55:57,420 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:55:57,420 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:55:57,420 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:55:57,422 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:55:57,423 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=846310, jitterRate=0.07614004611968994}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:55:57,423 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:55:57,424 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578157407Writing region info on filesystem at 1731578157408 (+1 ms)Initializing all the Stores at 1731578157408Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578157408Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578157409 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578157409Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578157409Cleaning up temporary data from old regions at 1731578157420 (+11 ms)Running coprocessor post-open hooks at 1731578157423 (+3 ms)Region opened successfully at 1731578157423 2024-11-14T09:55:57,425 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578157375 2024-11-14T09:55:57,428 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:55:57,428 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:55:57,429 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:57,430 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,46249,1731578156359, state=OPEN 2024-11-14T09:55:57,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:57,469 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:55:57,469 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:57,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:57,470 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:55:57,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:55:57,472 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,46249,1731578156359 in 251 msec 2024-11-14T09:55:57,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:55:57,475 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 730 msec 2024-11-14T09:55:57,476 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:55:57,476 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:55:57,477 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:57,478 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,46249,1731578156359, seqNum=-1] 2024-11-14T09:55:57,478 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:57,479 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43351, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:57,486 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 790 msec 2024-11-14T09:55:57,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578157486, completionTime=-1 2024-11-14T09:55:57,486 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:55:57,486 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:55:57,488 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:55:57,488 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578217488 2024-11-14T09:55:57,488 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578277488 2024-11-14T09:55:57,488 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:38147, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,489 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,491 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.068sec 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:55:57,494 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:55:57,497 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:55:57,497 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:55:57,497 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,38147,1731578156159-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:55:57,564 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:55:57,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,584 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,585 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,586 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@64ad885f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:57,586 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,38147,-1 for getting cluster id 2024-11-14T09:55:57,586 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:55:57,588 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '3b0af3b9-137f-4d16-9c40-e179bc8e72b4' 2024-11-14T09:55:57,589 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:55:57,589 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "3b0af3b9-137f-4d16-9c40-e179bc8e72b4" 2024-11-14T09:55:57,589 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@d088991, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:57,589 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,38147,-1] 2024-11-14T09:55:57,589 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:55:57,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,590 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:55:57,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,591 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46214, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:55:57,592 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@55f4ab1a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:55:57,593 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:55:57,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:55:57,594 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,46249,1731578156359, seqNum=-1] 2024-11-14T09:55:57,594 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:55:57,596 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55006, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:55:57,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,38147,1731578156159 2024-11-14T09:55:57,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:55:57,600 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:55:57,600 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-14T09:55:57,600 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-14T09:55:57,601 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:55:57,602 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is defc576eb6b7,38147,1731578156159 2024-11-14T09:55:57,602 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@767dbdb0 2024-11-14T09:55:57,602 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:55:57,603 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46220, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:55:57,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:55:57,604 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:55:57,604 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:55:57,606 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:55:57,607 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:55:57,608 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,608 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-14T09:55:57,609 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:55:57,609 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:55:57,616 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741835_1011 (size=395) 2024-11-14T09:55:57,617 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741835_1011 (size=395) 2024-11-14T09:55:57,618 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8e816a25b54b761576f1600c4a642fbd, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902 2024-11-14T09:55:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33763 is added to blk_1073741836_1012 (size=78) 2024-11-14T09:55:57,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39187 is added to blk_1073741836_1012 (size=78) 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 8e816a25b54b761576f1600c4a642fbd, disabling compactions & flushes 2024-11-14T09:55:57,631 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. after waiting 0 ms 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,631 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,631 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8e816a25b54b761576f1600c4a642fbd: Waiting for close lock at 1731578157631Disabling compacts and flushes for region at 1731578157631Disabling writes for close at 1731578157631Writing region close event to WAL at 1731578157631Closed at 1731578157631 2024-11-14T09:55:57,633 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:55:57,633 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1731578157633"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578157633"}]},"ts":"1731578157633"} 2024-11-14T09:55:57,638 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:55:57,640 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:55:57,640 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578157640"}]},"ts":"1731578157640"} 2024-11-14T09:55:57,643 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-14T09:55:57,643 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=8e816a25b54b761576f1600c4a642fbd, ASSIGN}] 2024-11-14T09:55:57,645 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=8e816a25b54b761576f1600c4a642fbd, ASSIGN 2024-11-14T09:55:57,646 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=8e816a25b54b761576f1600c4a642fbd, ASSIGN; state=OFFLINE, location=defc576eb6b7,46249,1731578156359; forceNewPlan=false, retain=false 2024-11-14T09:55:57,797 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e816a25b54b761576f1600c4a642fbd, regionState=OPENING, regionLocation=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:57,799 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=8e816a25b54b761576f1600c4a642fbd, ASSIGN because future has completed 2024-11-14T09:55:57,800 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e816a25b54b761576f1600c4a642fbd, server=defc576eb6b7,46249,1731578156359}] 2024-11-14T09:55:57,957 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,957 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8e816a25b54b761576f1600c4a642fbd, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:55:57,958 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,958 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:55:57,958 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,958 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,960 INFO [StoreOpener-8e816a25b54b761576f1600c4a642fbd-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,962 INFO [StoreOpener-8e816a25b54b761576f1600c4a642fbd-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8e816a25b54b761576f1600c4a642fbd columnFamilyName info 2024-11-14T09:55:57,962 DEBUG [StoreOpener-8e816a25b54b761576f1600c4a642fbd-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:55:57,962 INFO [StoreOpener-8e816a25b54b761576f1600c4a642fbd-1 {}] regionserver.HStore(327): Store=8e816a25b54b761576f1600c4a642fbd/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:55:57,962 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,963 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,963 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,964 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,964 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,966 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,968 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:55:57,968 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8e816a25b54b761576f1600c4a642fbd; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=812029, jitterRate=0.03254951536655426}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:55:57,968 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:55:57,969 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8e816a25b54b761576f1600c4a642fbd: Running coprocessor pre-open hook at 1731578157958Writing region info on filesystem at 1731578157958Initializing all the Stores at 1731578157959 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578157959Cleaning up temporary data from old regions at 1731578157964 (+5 ms)Running coprocessor post-open hooks at 1731578157968 (+4 ms)Region opened successfully at 1731578157968 2024-11-14T09:55:57,970 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd., pid=6, masterSystemTime=1731578157953 2024-11-14T09:55:57,972 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,972 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:55:57,973 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8e816a25b54b761576f1600c4a642fbd, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,46249,1731578156359 2024-11-14T09:55:57,975 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8e816a25b54b761576f1600c4a642fbd, server=defc576eb6b7,46249,1731578156359 because future has completed 2024-11-14T09:55:57,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:55:57,979 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8e816a25b54b761576f1600c4a642fbd, server=defc576eb6b7,46249,1731578156359 in 176 msec 2024-11-14T09:55:57,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:55:57,982 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=8e816a25b54b761576f1600c4a642fbd, ASSIGN in 336 msec 2024-11-14T09:55:57,983 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:55:57,983 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578157983"}]},"ts":"1731578157983"} 2024-11-14T09:55:57,985 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-14T09:55:57,986 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:55:57,988 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 382 msec 2024-11-14T09:55:58,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:58,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:59,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:55:59,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:00,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:00,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:01,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:01,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:02,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:56:02,058 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:02,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:56:02,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:02,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:56:02,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:56:02,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:56:02,060 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:02,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:02,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:02,913 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:56:02,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,939 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,945 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:02,953 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:56:02,954 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-14T09:56:03,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:03,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:04,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:04,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:05,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:05,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:06,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:06,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:07,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:07,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:07,644 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=38147 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:56:07,645 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-14T09:56:07,645 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-14T09:56:07,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:56:07,653 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:07,658 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd., hostname=defc576eb6b7,46249,1731578156359, seqNum=2] 2024-11-14T09:56:07,763 INFO [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(1368): Slow sync cost: 102 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] 2024-11-14T09:56:08,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:08,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:09,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:09,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:09,764 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:09,766 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,766 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,766 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,767 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK], DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]) is bad. 2024-11-14T09:56:09,767 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]) is bad. 2024-11-14T09:56:09,767 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK], DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39187,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]) is bad. 2024-11-14T09:56:09,768 WARN [PacketResponder: BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39187] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,768 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-582465400_22 at /127.0.0.1:55906 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:39187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55906 dst: /127.0.0.1:39187 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:55942 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:39187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55942 dst: /127.0.0.1:39187 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-582465400_22 at /127.0.0.1:56688 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56688 dst: /127.0.0.1:33763 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,769 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:56718 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56718 dst: /127.0.0.1:33763 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:56710 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56710 dst: /127.0.0.1:33763 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,770 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:55948 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:39187:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55948 dst: /127.0.0.1:39187 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,818 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@aae1781{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:09,819 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4ef0fe27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:09,820 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:09,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@794cb94f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:09,820 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@21998c84{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:09,822 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:09,822 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 2291a4fc-d379-4557-83b1-a9f5772690f1) service to localhost/127.0.0.1:41049 2024-11-14T09:56:09,823 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:09,823 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:09,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data3/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:09,824 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data4/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:09,824 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:09,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:09,839 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:09,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:09,840 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:09,840 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:56:09,840 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@b4c4f18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:09,841 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6e37a2ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:09,932 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f49120b{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-34075-hadoop-hdfs-3_4_1-tests_jar-_-any-9574339277384986087/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:09,932 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f9f08a5{HTTP/1.1, (http/1.1)}{localhost:34075} 2024-11-14T09:56:09,933 INFO [Time-limited test {}] server.Server(415): Started @174568ms 2024-11-14T09:56:09,934 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:09,949 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,949 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,949 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:09,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-582465400_22 at /127.0.0.1:37626 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37626 dst: /127.0.0.1:33763 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:37612 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37612 dst: /127.0.0.1:33763 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,949 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:37606 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:33763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37606 dst: /127.0.0.1:33763 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:09,952 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@131e974b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:09,952 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5aff23e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:09,953 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:09,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7f6bebc0{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:09,953 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b3376d9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:09,954 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:09,954 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:09,954 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:09,954 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 9439d78e-b7e1-4820-9c57-68c70e333371) service to localhost/127.0.0.1:41049 2024-11-14T09:56:09,954 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data1/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:09,955 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data2/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:09,955 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:09,964 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:09,966 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:09,971 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:09,971 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:09,971 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:56:09,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54cda05a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:09,972 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3393bacb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:10,065 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5f19ade1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-45791-hadoop-hdfs-3_4_1-tests_jar-_-any-7835966811078322856/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:10,065 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5fb420d9{HTTP/1.1, (http/1.1)}{localhost:45791} 2024-11-14T09:56:10,065 INFO [Time-limited test {}] server.Server(415): Started @174700ms 2024-11-14T09:56:10,066 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:10,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:10,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:10,600 WARN [Thread-1355 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:10,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd65fd4539169f518 with lease ID 0x7c010e6d37478fd0: from storage DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd node DatanodeRegistration(127.0.0.1:42271, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=40461, infoSecurePort=0, ipcPort=38971, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:10,602 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd65fd4539169f518 with lease ID 0x7c010e6d37478fd0: from storage DS-868ea760-9830-4351-9815-14d115745793 node DatanodeRegistration(127.0.0.1:42271, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=40461, infoSecurePort=0, ipcPort=38971, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:10,909 WARN [Thread-1375 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:10,911 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5c6e51ed9f8e6f4 with lease ID 0x7c010e6d37478fd1: from storage DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2 node DatanodeRegistration(127.0.0.1:42559, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=45011, infoSecurePort=0, ipcPort=35005, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:10,911 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xb5c6e51ed9f8e6f4 with lease ID 0x7c010e6d37478fd1: from storage DS-63aa3513-71b6-4183-a315-2fac647fa369 node DatanodeRegistration(127.0.0.1:42559, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=45011, infoSecurePort=0, ipcPort=35005, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:11,082 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-14T09:56:11,086 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-14T09:56:11,089 ERROR [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:11,089 WARN [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:11,089 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C46249%2C1731578156359:(num 1731578156990) roll requested 2024-11-14T09:56:11,090 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:11,098 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 newFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:11,098 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:11,098 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:11,098 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:11,098 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:11,098 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:11,099 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:11,100 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:11,100 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:11,100 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:11,101 WARN [IPC Server handler 0 on default port 41049 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-14T09:56:11,101 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 after 1ms 2024-11-14T09:56:11,104 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45011:45011),(127.0.0.1/127.0.0.1:40461:40461)] 2024-11-14T09:56:11,105 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 is not closed yet, will try archiving it next time 2024-11-14T09:56:11,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:11,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:12,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:12,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:13,111 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-14T09:56:13,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:13,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:13,603 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:56:14,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:14,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:15,103 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 after 4003ms 2024-11-14T09:56:15,116 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:15,117 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42559,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK], DatanodeInfoWithStorage[127.0.0.1:42271,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42559,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]) is bad. 2024-11-14T09:56:15,118 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:35946 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42559:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35946 dst: /127.0.0.1:42559 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:15,119 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:55326 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42271:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:55326 dst: /127.0.0.1:42271 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:15,173 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5f19ade1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:15,174 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5fb420d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:15,174 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:15,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3393bacb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:15,174 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54cda05a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:15,175 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:15,175 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:15,175 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:15,175 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 9439d78e-b7e1-4820-9c57-68c70e333371) service to localhost/127.0.0.1:41049 2024-11-14T09:56:15,176 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data1/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:15,177 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data2/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:15,177 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:15,185 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:15,188 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:15,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:15,188 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:15,189 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:56:15,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@27361061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:15,189 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@22699b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:15,285 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bda5c57{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-35923-hadoop-hdfs-3_4_1-tests_jar-_-any-16101995312252281119/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:15,285 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@40b8cc2f{HTTP/1.1, (http/1.1)}{localhost:35923} 2024-11-14T09:56:15,285 INFO [Time-limited test {}] server.Server(415): Started @179920ms 2024-11-14T09:56:15,286 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:15,305 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:15,305 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_292451186_22 at /127.0.0.1:35802 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:42271:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35802 dst: /127.0.0.1:42271 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:15,309 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f49120b{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:15,309 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f9f08a5{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:15,309 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:15,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6e37a2ab{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:15,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@b4c4f18{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:15,310 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:15,310 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:15,311 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:15,311 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 2291a4fc-d379-4557-83b1-a9f5772690f1) service to localhost/127.0.0.1:41049 2024-11-14T09:56:15,311 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data3/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:15,311 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data4/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:15,311 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:15,324 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:15,327 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:15,328 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:15,328 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:15,328 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:56:15,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1146b324{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:15,329 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@511ae001{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:15,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:15,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:15,433 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27703b15{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/java.io.tmpdir/jetty-localhost-41687-hadoop-hdfs-3_4_1-tests_jar-_-any-7304016700250914354/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:15,434 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@51f33716{HTTP/1.1, (http/1.1)}{localhost:41687} 2024-11-14T09:56:15,434 INFO [Time-limited test {}] server.Server(415): Started @180069ms 2024-11-14T09:56:15,435 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:15,834 WARN [Thread-1429 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52996e03e6091fd5 with lease ID 0x7c010e6d37478fd2: from storage DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2 node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=33017, infoSecurePort=0, ipcPort=34517, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:15,836 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x52996e03e6091fd5 with lease ID 0x7c010e6d37478fd2: from storage DS-63aa3513-71b6-4183-a315-2fac647fa369 node DatanodeRegistration(127.0.0.1:39969, datanodeUuid=9439d78e-b7e1-4820-9c57-68c70e333371, infoPort=33017, infoSecurePort=0, ipcPort=34517, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:16,047 WARN [Thread-1449 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe558665fd93ff844 with lease ID 0x7c010e6d37478fd3: from storage DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd node DatanodeRegistration(127.0.0.1:40101, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=33869, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:16,050 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe558665fd93ff844 with lease ID 0x7c010e6d37478fd3: from storage DS-868ea760-9830-4351-9815-14d115745793 node DatanodeRegistration(127.0.0.1:40101, datanodeUuid=2291a4fc-d379-4557-83b1-a9f5772690f1, infoPort=33869, infoSecurePort=0, ipcPort=40493, storageInfo=lv=-57;cid=testClusterID;nsid=789681237;c=1731578153948), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:16,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:16,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:16,460 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-14T09:56:16,467 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-14T09:56:16,469 ERROR [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42271,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:16,469 WARN [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42271,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:16,469 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C46249%2C1731578156359:(num 1731578171090) roll requested 2024-11-14T09:56:16,469 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:16,474 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 newFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:16,475 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:16,475 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:16,475 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:16,475 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:16,475 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:16,475 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:16,475 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42271,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:16,475 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:42271,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:16,475 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:16,476 WARN [IPC Server handler 2 on default port 41049 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-14T09:56:16,476 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33869:33869),(127.0.0.1/127.0.0.1:33017:33017)] 2024-11-14T09:56:16,476 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 is not closed yet, will try archiving it next time 2024-11-14T09:56:16,476 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 after 0ms 2024-11-14T09:56:17,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:17,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:18,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:18,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:18,478 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:18,487 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 newFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:18,487 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:18,487 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:18,488 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:18,488 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:18,488 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:18,488 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:18,490 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33017:33017),(127.0.0.1/127.0.0.1:33869:33869)] 2024-11-14T09:56:18,490 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 is not closed yet, will try archiving it next time 2024-11-14T09:56:18,490 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 is not closed yet, will try archiving it next time 2024-11-14T09:56:18,491 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:18,491 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:18,492 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 after 0ms 2024-11-14T09:56:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741838_1019 (size=1264) 2024-11-14T09:56:18,492 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:18,492 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741838_1019 (size=1264) 2024-11-14T09:56:18,493 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 is not closed yet, will try archiving it next time 2024-11-14T09:56:18,505 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1731578157969/Put/vlen=218/seqid=0] 2024-11-14T09:56:18,505 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1731578167659/Put/vlen=1045/seqid=0] 2024-11-14T09:56:18,505 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578156990 2024-11-14T09:56:18,505 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:18,505 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:18,506 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 after 1ms 2024-11-14T09:56:18,506 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:18,509 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1731578171088/Put/vlen=1045/seqid=0] 2024-11-14T09:56:18,509 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1731578173113/Put/vlen=1045/seqid=0] 2024-11-14T09:56:18,509 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 2024-11-14T09:56:18,509 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:18,509 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:18,510 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 after 0ms 2024-11-14T09:56:18,510 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578176469 2024-11-14T09:56:18,513 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1731578176468/Put/vlen=1045/seqid=0] 2024-11-14T09:56:18,513 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:18,513 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:18,513 WARN [IPC Server handler 3 on default port 41049 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-14T09:56:18,514 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 after 1ms 2024-11-14T09:56:19,054 WARN [ResponseProcessor for block BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:19,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-582465400_22 at /127.0.0.1:52012 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:39969:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:52012 dst: /127.0.0.1:39969 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:39969 remote=/127.0.0.1:52012]. Total timeout mills is 60000, 59432 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:19,054 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-582465400_22 at /127.0.0.1:48032 [Receiving block BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:40101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:48032 dst: /127.0.0.1:40101 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:19,054 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 block BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39969,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK], DatanodeInfoWithStorage[127.0.0.1:40101,DS-25986c45-c58a-4e7e-8cf5-f3f219d61dcd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39969,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]) is bad. 2024-11-14T09:56:19,055 WARN [DataStreamer for file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 block BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:19,058 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741839_1022 (size=85) 2024-11-14T09:56:19,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:19,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:20,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:20,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:20,478 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578171090 after 4002ms 2024-11-14T09:56:21,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:21,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:21,837 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:56:22,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:22,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:22,515 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 after 4002ms 2024-11-14T09:56:22,516 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:22,525 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:22,525 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 8e816a25b54b761576f1600c4a642fbd 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-14T09:56:22,526 ERROR [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,526 WARN [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,527 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C46249%2C1731578156359:(num 1731578178477) roll requested 2024-11-14T09:56:22,527 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.1731578182527 2024-11-14T09:56:22,533 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 newFile=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578182527 2024-11-14T09:56:22,533 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,533 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,533 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,533 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,533 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,534 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578182527 2024-11-14T09:56:22,534 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,534 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33869:33869),(127.0.0.1/127.0.0.1:33017:33017)] 2024-11-14T09:56:22,534 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 is not closed yet, will try archiving it next time 2024-11-14T09:56:22,534 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1086040793-172.17.0.2-1731578153948:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,535 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:22,535 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 after 0ms 2024-11-14T09:56:22,536 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.1731578178477 to hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs/defc576eb6b7%2C46249%2C1731578156359.1731578178477 2024-11-14T09:56:22,553 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/.tmp/info/1d3d7259e99e44759bcd5d55b07b7a6d is 1080, key is row1002/info:/1731578167659/Put/seqid=0 2024-11-14T09:56:22,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741841_1024 (size=9270) 2024-11-14T09:56:22,558 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741841_1024 (size=9270) 2024-11-14T09:56:22,558 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/.tmp/info/1d3d7259e99e44759bcd5d55b07b7a6d 2024-11-14T09:56:22,565 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/.tmp/info/1d3d7259e99e44759bcd5d55b07b7a6d as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/info/1d3d7259e99e44759bcd5d55b07b7a6d 2024-11-14T09:56:22,571 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/info/1d3d7259e99e44759bcd5d55b07b7a6d, entries=4, sequenceid=8, filesize=9.1 K 2024-11-14T09:56:22,572 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 8e816a25b54b761576f1600c4a642fbd in 47ms, sequenceid=8, compaction requested=false 2024-11-14T09:56:22,572 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 8e816a25b54b761576f1600c4a642fbd: 2024-11-14T09:56:22,572 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-14T09:56:22,572 ERROR [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,572 WARN [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902-prefix:defc576eb6b7,46249,1731578156359.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,572 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C46249%2C1731578156359.meta:.meta(num 1731578157389) roll requested 2024-11-14T09:56:22,573 INFO [regionserver/defc576eb6b7:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46249%2C1731578156359.meta.1731578182572.meta 2024-11-14T09:56:22,578 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,578 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,578 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,578 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,578 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,578 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578182572.meta 2024-11-14T09:56:22,578 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,579 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:22,579 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta 2024-11-14T09:56:22,579 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33869:33869),(127.0.0.1/127.0.0.1:33017:33017)] 2024-11-14T09:56:22,579 DEBUG [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta is not closed yet, will try archiving it next time 2024-11-14T09:56:22,579 WARN [IPC Server handler 4 on default port 41049 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1013 2024-11-14T09:56:22,579 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta after 0ms 2024-11-14T09:56:22,594 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/info/83a68ea8775344e9939c72f807734523 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd./info:regioninfo/1731578157973/Put/seqid=0 2024-11-14T09:56:22,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741843_1027 (size=7125) 2024-11-14T09:56:22,599 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741843_1027 (size=7125) 2024-11-14T09:56:22,599 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/info/83a68ea8775344e9939c72f807734523 2024-11-14T09:56:22,623 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/ns/fee190d7cd28484696bc9359ac04c31e is 43, key is default/ns:d/1731578157480/Put/seqid=0 2024-11-14T09:56:22,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741844_1028 (size=5153) 2024-11-14T09:56:22,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741844_1028 (size=5153) 2024-11-14T09:56:22,628 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/ns/fee190d7cd28484696bc9359ac04c31e 2024-11-14T09:56:22,647 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/table/b09e152ce61c47aea1b620b47b35058b is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1731578157983/Put/seqid=0 2024-11-14T09:56:22,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741845_1029 (size=5438) 2024-11-14T09:56:22,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741845_1029 (size=5438) 2024-11-14T09:56:22,652 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/table/b09e152ce61c47aea1b620b47b35058b 2024-11-14T09:56:22,658 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/info/83a68ea8775344e9939c72f807734523 as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/info/83a68ea8775344e9939c72f807734523 2024-11-14T09:56:22,664 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/info/83a68ea8775344e9939c72f807734523, entries=10, sequenceid=11, filesize=7.0 K 2024-11-14T09:56:22,665 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/ns/fee190d7cd28484696bc9359ac04c31e as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/ns/fee190d7cd28484696bc9359ac04c31e 2024-11-14T09:56:22,671 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/ns/fee190d7cd28484696bc9359ac04c31e, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:56:22,672 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/.tmp/table/b09e152ce61c47aea1b620b47b35058b as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/table/b09e152ce61c47aea1b620b47b35058b 2024-11-14T09:56:22,679 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/table/b09e152ce61c47aea1b620b47b35058b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-14T09:56:22,680 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 108ms, sequenceid=11, compaction requested=false 2024-11-14T09:56:22,680 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:56:22,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:56:22,685 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:56:22,685 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:56:22,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:22,685 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:22,685 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:56:22,685 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:56:22,685 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1722148593, stopped=false 2024-11-14T09:56:22,686 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,38147,1731578156159 2024-11-14T09:56:22,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:56:22,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:56:22,751 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:56:22,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:22,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:22,752 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:56:22,752 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:56:22,752 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:56:22,752 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:56:22,752 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:22,752 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,46249,1731578156359' ***** 2024-11-14T09:56:22,752 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:56:22,752 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:56:22,752 INFO [RS:0;defc576eb6b7:46249 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:56:22,752 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(3091): Received CLOSE for 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,46249,1731578156359 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:46249. 2024-11-14T09:56:22,753 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8e816a25b54b761576f1600c4a642fbd, disabling compactions & flushes 2024-11-14T09:56:22,753 DEBUG [RS:0;defc576eb6b7:46249 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:56:22,753 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:22,753 DEBUG [RS:0;defc576eb6b7:46249 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:22,753 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:22,753 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. after waiting 0 ms 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:56:22,753 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:56:22,753 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:56:22,754 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1325): Online Regions={8e816a25b54b761576f1600c4a642fbd=TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:56:22,754 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:56:22,754 DEBUG [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8e816a25b54b761576f1600c4a642fbd 2024-11-14T09:56:22,754 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:56:22,754 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:56:22,754 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:56:22,754 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:56:22,759 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/default/TestLogRolling-testLogRollOnPipelineRestart/8e816a25b54b761576f1600c4a642fbd/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-14T09:56:22,759 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:56:22,760 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:56:22,760 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:22,760 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:56:22,760 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578182754Running coprocessor pre-close hooks at 1731578182754Disabling compacts and flushes for region at 1731578182754Disabling writes for close at 1731578182754Writing region close event to WAL at 1731578182755 (+1 ms)Running coprocessor post-close hooks at 1731578182760 (+5 ms)Closed at 1731578182760 2024-11-14T09:56:22,760 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8e816a25b54b761576f1600c4a642fbd: Waiting for close lock at 1731578182753Running coprocessor pre-close hooks at 1731578182753Disabling compacts and flushes for region at 1731578182753Disabling writes for close at 1731578182753Writing region close event to WAL at 1731578182754 (+1 ms)Running coprocessor post-close hooks at 1731578182760 (+6 ms)Closed at 1731578182760 2024-11-14T09:56:22,760 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:56:22,760 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1731578157604.8e816a25b54b761576f1600c4a642fbd. 2024-11-14T09:56:22,853 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:56:22,935 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-14T09:56:22,936 INFO [regionserver/defc576eb6b7:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-14T09:56:22,954 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,46249,1731578156359; all regions closed. 2024-11-14T09:56:22,955 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,956 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:22,960 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741842_1025 (size=825) 2024-11-14T09:56:22,961 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741842_1025 (size=825) 2024-11-14T09:56:23,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:23,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:24,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:24,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:25,050 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1013: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:56:25,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:25,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:26,137 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:56:26,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:26,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:26,580 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta after 4001ms 2024-11-14T09:56:26,581 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/WALs/defc576eb6b7,46249,1731578156359/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta to hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs/defc576eb6b7%2C46249%2C1731578156359.meta.1731578157389.meta 2024-11-14T09:56:26,584 DEBUG [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs 2024-11-14T09:56:26,584 INFO [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C46249%2C1731578156359.meta:.meta(num 1731578182572) 2024-11-14T09:56:26,584 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,584 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,585 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,585 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,585 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741840_1023 (size=1162) 2024-11-14T09:56:26,587 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741840_1023 (size=1162) 2024-11-14T09:56:26,592 DEBUG [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C46249%2C1731578156359:(num 1731578182527) 2024-11-14T09:56:26,592 DEBUG [RS:0;defc576eb6b7:46249 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:56:26,592 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:56:26,592 INFO [RS:0;defc576eb6b7:46249 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46249 2024-11-14T09:56:26,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,46249,1731578156359 2024-11-14T09:56:26,630 INFO [RS:0;defc576eb6b7:46249 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:56:26,630 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:56:26,646 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,46249,1731578156359] 2024-11-14T09:56:26,656 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,46249,1731578156359 already deleted, retry=false 2024-11-14T09:56:26,656 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,46249,1731578156359 expired; onlineServers=0 2024-11-14T09:56:26,656 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,38147,1731578156159' ***** 2024-11-14T09:56:26,656 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:56:26,656 INFO [M:0;defc576eb6b7:38147 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:56:26,656 INFO [M:0;defc576eb6b7:38147 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:56:26,657 DEBUG [M:0;defc576eb6b7:38147 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:56:26,657 DEBUG [M:0;defc576eb6b7:38147 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:56:26,657 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:56:26,657 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578156705 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578156705,5,FailOnTimeoutGroup] 2024-11-14T09:56:26,657 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578156705 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578156705,5,FailOnTimeoutGroup] 2024-11-14T09:56:26,657 INFO [M:0;defc576eb6b7:38147 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:56:26,657 INFO [M:0;defc576eb6b7:38147 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:56:26,657 DEBUG [M:0;defc576eb6b7:38147 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:56:26,657 INFO [M:0;defc576eb6b7:38147 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:56:26,657 INFO [M:0;defc576eb6b7:38147 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:56:26,657 INFO [M:0;defc576eb6b7:38147 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:56:26,657 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:56:26,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:56:26,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:26,667 DEBUG [M:0;defc576eb6b7:38147 {}] zookeeper.ZKUtil(347): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:56:26,667 WARN [M:0;defc576eb6b7:38147 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:56:26,668 INFO [M:0;defc576eb6b7:38147 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/.lastflushedseqids 2024-11-14T09:56:26,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741846_1030 (size=111) 2024-11-14T09:56:26,674 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741846_1030 (size=111) 2024-11-14T09:56:26,674 INFO [M:0;defc576eb6b7:38147 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:56:26,674 INFO [M:0;defc576eb6b7:38147 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:56:26,674 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:56:26,674 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:26,674 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:26,674 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:56:26,674 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:26,675 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.16 KB heapSize=29.13 KB 2024-11-14T09:56:26,675 ERROR [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData-prefix:defc576eb6b7,38147,1731578156159 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:26,675 WARN [FSHLog-0-hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData-prefix:defc576eb6b7,38147,1731578156159 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:26,675 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog defc576eb6b7%2C38147%2C1731578156159:(num 1731578156486) roll requested 2024-11-14T09:56:26,675 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C38147%2C1731578156159.1731578186675 2024-11-14T09:56:26,681 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,681 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,681 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,681 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,681 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,682 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 with entries=53, filesize=26.61 KB; new WAL /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578186675 2024-11-14T09:56:26,682 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:26,682 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:33763,DS-9c3ce239-66fc-4d8e-9368-5c88f883dec2,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-14T09:56:26,682 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 2024-11-14T09:56:26,683 WARN [IPC Server handler 0 on default port 41049 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1015 2024-11-14T09:56:26,683 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 after 1ms 2024-11-14T09:56:26,684 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33017:33017),(127.0.0.1/127.0.0.1:33869:33869)] 2024-11-14T09:56:26,684 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 is not closed yet, will try archiving it next time 2024-11-14T09:56:26,704 DEBUG [M:0;defc576eb6b7:38147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d0502433b2440db8d70d091fc973065 is 82, key is hbase:meta,,1/info:regioninfo/1731578157429/Put/seqid=0 2024-11-14T09:56:26,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741848_1033 (size=5672) 2024-11-14T09:56:26,709 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741848_1033 (size=5672) 2024-11-14T09:56:26,710 INFO [M:0;defc576eb6b7:38147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d0502433b2440db8d70d091fc973065 2024-11-14T09:56:26,734 DEBUG [M:0;defc576eb6b7:38147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f305ffeb2b364989a04529ebd10bf91a is 777, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731578157987/Put/seqid=0 2024-11-14T09:56:26,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741849_1034 (size=6117) 2024-11-14T09:56:26,740 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741849_1034 (size=6117) 2024-11-14T09:56:26,741 INFO [M:0;defc576eb6b7:38147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.56 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f305ffeb2b364989a04529ebd10bf91a 2024-11-14T09:56:26,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:56:26,746 INFO [RS:0;defc576eb6b7:46249 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:56:26,746 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:46249-0x10138c5d0170001, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:56:26,746 INFO [RS:0;defc576eb6b7:46249 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,46249,1731578156359; zookeeper connection closed. 2024-11-14T09:56:26,746 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@506a674f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@506a674f 2024-11-14T09:56:26,746 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:56:26,769 DEBUG [M:0;defc576eb6b7:38147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/987399ece5df4b5cac600ac2eaecd8a4 is 69, key is defc576eb6b7,46249,1731578156359/rs:state/1731578156828/Put/seqid=0 2024-11-14T09:56:26,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741850_1035 (size=5156) 2024-11-14T09:56:26,786 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741850_1035 (size=5156) 2024-11-14T09:56:26,787 INFO [M:0;defc576eb6b7:38147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/987399ece5df4b5cac600ac2eaecd8a4 2024-11-14T09:56:26,812 DEBUG [M:0;defc576eb6b7:38147 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7e8f062458b4925a318bf2c87fe74b1 is 52, key is load_balancer_on/state:d/1731578157599/Put/seqid=0 2024-11-14T09:56:26,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741851_1036 (size=5056) 2024-11-14T09:56:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741851_1036 (size=5056) 2024-11-14T09:56:26,827 INFO [M:0;defc576eb6b7:38147 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7e8f062458b4925a318bf2c87fe74b1 2024-11-14T09:56:26,840 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5d0502433b2440db8d70d091fc973065 as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5d0502433b2440db8d70d091fc973065 2024-11-14T09:56:26,849 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5d0502433b2440db8d70d091fc973065, entries=8, sequenceid=56, filesize=5.5 K 2024-11-14T09:56:26,851 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f305ffeb2b364989a04529ebd10bf91a as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f305ffeb2b364989a04529ebd10bf91a 2024-11-14T09:56:26,858 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f305ffeb2b364989a04529ebd10bf91a, entries=6, sequenceid=56, filesize=6.0 K 2024-11-14T09:56:26,859 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/987399ece5df4b5cac600ac2eaecd8a4 as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/987399ece5df4b5cac600ac2eaecd8a4 2024-11-14T09:56:26,867 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/987399ece5df4b5cac600ac2eaecd8a4, entries=1, sequenceid=56, filesize=5.0 K 2024-11-14T09:56:26,868 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/d7e8f062458b4925a318bf2c87fe74b1 as hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d7e8f062458b4925a318bf2c87fe74b1 2024-11-14T09:56:26,875 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/d7e8f062458b4925a318bf2c87fe74b1, entries=1, sequenceid=56, filesize=4.9 K 2024-11-14T09:56:26,876 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 202ms, sequenceid=56, compaction requested=false 2024-11-14T09:56:26,878 INFO [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:26,878 DEBUG [M:0;defc576eb6b7:38147 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578186674Disabling compacts and flushes for region at 1731578186674Disabling writes for close at 1731578186674Obtaining lock to block concurrent updates at 1731578186675 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578186675Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23714, getHeapSize=29768, getOffHeapSize=0, getCellsCount=67 at 1731578186675Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578186685 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578186685Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578186703 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578186703Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578186717 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578186733 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578186733Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578186747 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578186768 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578186769 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578186795 (+26 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578186812 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578186812Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@ce14382: reopening flushed file at 1731578186839 (+27 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@c2b03b8: reopening flushed file at 1731578186850 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2acbdfdf: reopening flushed file at 1731578186858 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@311023b3: reopening flushed file at 1731578186867 (+9 ms)Finished flush of dataSize ~23.16 KB/23714, heapSize ~29.07 KB/29768, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 202ms, sequenceid=56, compaction requested=false at 1731578186876 (+9 ms)Writing region close event to WAL at 1731578186878 (+2 ms)Closed at 1731578186878 2024-11-14T09:56:26,878 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,878 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,878 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,878 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,879 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:56:26,881 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40101 is added to blk_1073741847_1031 (size=757) 2024-11-14T09:56:26,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39969 is added to blk_1073741847_1031 (size=757) 2024-11-14T09:56:27,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:27,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:27,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,761 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,782 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,783 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,784 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,789 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,790 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:27,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,051 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1015: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-14T09:56:28,302 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:56:28,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,321 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,322 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,323 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,327 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,328 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,330 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:28,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:28,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:29,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:29,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:30,369 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:30,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:30,684 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 after 4002ms 2024-11-14T09:56:30,684 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/WALs/defc576eb6b7,38147,1731578156159/defc576eb6b7%2C38147%2C1731578156159.1731578156486 to hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/oldWALs/defc576eb6b7%2C38147%2C1731578156159.1731578156486 2024-11-14T09:56:30,687 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/MasterData/oldWALs/defc576eb6b7%2C38147%2C1731578156159.1731578156486 to hdfs://localhost:41049/user/jenkins/test-data/5163fbe7-f74d-0884-30b0-828e7638d902/oldWALs/defc576eb6b7%2C38147%2C1731578156159.1731578156486$masterlocalwal$ 2024-11-14T09:56:30,687 INFO [M:0;defc576eb6b7:38147 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:56:30,687 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:56:30,687 INFO [M:0;defc576eb6b7:38147 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:38147 2024-11-14T09:56:30,687 INFO [M:0;defc576eb6b7:38147 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:56:30,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:56:30,798 INFO [M:0;defc576eb6b7:38147 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:56:30,798 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:38147-0x10138c5d0170000, quorum=127.0.0.1:54564, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:56:30,800 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27703b15{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:30,801 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@51f33716{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:30,801 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:30,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@511ae001{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:30,801 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1146b324{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:30,804 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:30,804 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:30,804 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:30,804 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 2291a4fc-d379-4557-83b1-a9f5772690f1) service to localhost/127.0.0.1:41049 2024-11-14T09:56:30,804 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data3/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:30,805 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data4/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:30,805 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:30,813 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bda5c57{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:30,813 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@40b8cc2f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:30,813 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:30,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@22699b58{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:30,814 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@27361061{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:30,815 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:56:30,815 WARN [BP-1086040793-172.17.0.2-1731578153948 heartbeating to localhost/127.0.0.1:41049 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1086040793-172.17.0.2-1731578153948 (Datanode Uuid 9439d78e-b7e1-4820-9c57-68c70e333371) service to localhost/127.0.0.1:41049 2024-11-14T09:56:30,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data1/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:30,816 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/cluster_57f33e63-508a-f184-e5ff-547142bd98b5/data/data2/current/BP-1086040793-172.17.0.2-1731578153948 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:56:30,816 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:56:30,816 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:56:30,816 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:56:30,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40ce61ea{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:56:30,824 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3734ddc7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:56:30,824 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:56:30,824 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@35d68916{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:56:30,824 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@8df6f39{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir/,STOPPED} 2024-11-14T09:56:30,832 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:56:30,853 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:56:30,864 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41049 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41049 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41049 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41049 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:41049 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41049 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 452) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=223 (was 144) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3819 (was 4426) 2024-11-14T09:56:30,872 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=223, ProcessCount=11, AvailableMemoryMB=3819 2024-11-14T09:56:30,872 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.log.dir so I do NOT create it in target/test-data/f45af986-a215-914b-f259-153888a32b3e 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/42323c42-5683-58a8-de2f-5178ded1aa2d/hadoop.tmp.dir so I do NOT create it in target/test-data/f45af986-a215-914b-f259-153888a32b3e 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93, deleteOnExit=true 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/test.cache.data in system properties and HBase conf 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:56:30,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:56:30,874 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:56:30,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:56:30,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:56:30,893 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:56:31,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:31,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:31,464 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:31,468 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:31,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:31,482 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:31,482 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:56:31,483 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:31,483 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2941128e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:31,484 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7165585e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:31,601 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5c49ee69{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/java.io.tmpdir/jetty-localhost-33367-hadoop-hdfs-3_4_1-tests_jar-_-any-8494030943182782925/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:56:31,602 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@34381312{HTTP/1.1, (http/1.1)}{localhost:33367} 2024-11-14T09:56:31,602 INFO [Time-limited test {}] server.Server(415): Started @196237ms 2024-11-14T09:56:31,614 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:56:31,871 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:31,875 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:31,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:31,876 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:31,876 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:56:31,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1574660e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:31,877 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1e078168{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:31,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1ab597ae{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/java.io.tmpdir/jetty-localhost-38441-hadoop-hdfs-3_4_1-tests_jar-_-any-12077148685564311042/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:31,987 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6ba04ef4{HTTP/1.1, (http/1.1)}{localhost:38441} 2024-11-14T09:56:31,987 INFO [Time-limited test {}] server.Server(415): Started @196622ms 2024-11-14T09:56:31,988 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:32,025 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:56:32,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:56:32,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:56:32,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:56:32,029 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:56:32,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@435e4405{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:56:32,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1d69a14c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:56:32,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:56:32,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:56:32,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:56:32,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-14T09:56:32,133 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4423b898{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/java.io.tmpdir/jetty-localhost-46195-hadoop-hdfs-3_4_1-tests_jar-_-any-2635662939352250461/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:56:32,134 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c7e4fc9{HTTP/1.1, (http/1.1)}{localhost:46195} 2024-11-14T09:56:32,134 INFO [Time-limited test {}] server.Server(415): Started @196769ms 2024-11-14T09:56:32,135 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:56:32,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:32,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:32,966 WARN [Thread-1669 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data1/current/BP-1487274196-172.17.0.2-1731578190907/current, will proceed with Du for space computation calculation, 2024-11-14T09:56:32,966 WARN [Thread-1670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data2/current/BP-1487274196-172.17.0.2-1731578190907/current, will proceed with Du for space computation calculation, 2024-11-14T09:56:32,992 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ad7f10cc0eb27f1 with lease ID 0xe0959b6c01f15cc1: Processing first storage report for DS-2ce274e8-0d3b-4f8a-a71c-d9e4e7d3f03e from datanode DatanodeRegistration(127.0.0.1:37477, datanodeUuid=7cef2426-2e35-4136-8f2c-c45d68b10bd7, infoPort=37187, infoSecurePort=0, ipcPort=34015, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907) 2024-11-14T09:56:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ad7f10cc0eb27f1 with lease ID 0xe0959b6c01f15cc1: from storage DS-2ce274e8-0d3b-4f8a-a71c-d9e4e7d3f03e node DatanodeRegistration(127.0.0.1:37477, datanodeUuid=7cef2426-2e35-4136-8f2c-c45d68b10bd7, infoPort=37187, infoSecurePort=0, ipcPort=34015, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9ad7f10cc0eb27f1 with lease ID 0xe0959b6c01f15cc1: Processing first storage report for DS-686f90ca-5af6-46eb-a4cb-4e1d0c9814fa from datanode DatanodeRegistration(127.0.0.1:37477, datanodeUuid=7cef2426-2e35-4136-8f2c-c45d68b10bd7, infoPort=37187, infoSecurePort=0, ipcPort=34015, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907) 2024-11-14T09:56:32,994 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9ad7f10cc0eb27f1 with lease ID 0xe0959b6c01f15cc1: from storage DS-686f90ca-5af6-46eb-a4cb-4e1d0c9814fa node DatanodeRegistration(127.0.0.1:37477, datanodeUuid=7cef2426-2e35-4136-8f2c-c45d68b10bd7, infoPort=37187, infoSecurePort=0, ipcPort=34015, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:33,176 WARN [Thread-1681 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data4/current/BP-1487274196-172.17.0.2-1731578190907/current, will proceed with Du for space computation calculation, 2024-11-14T09:56:33,176 WARN [Thread-1680 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data3/current/BP-1487274196-172.17.0.2-1731578190907/current, will proceed with Du for space computation calculation, 2024-11-14T09:56:33,198 WARN [Thread-1656 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:56:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x829bcba9fdc66a7b with lease ID 0xe0959b6c01f15cc2: Processing first storage report for DS-d2162aa3-b640-4aa6-bd12-54019f2065cf from datanode DatanodeRegistration(127.0.0.1:43217, datanodeUuid=10ea4720-0575-4d65-b3e3-aa27bb890a75, infoPort=46151, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907) 2024-11-14T09:56:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x829bcba9fdc66a7b with lease ID 0xe0959b6c01f15cc2: from storage DS-d2162aa3-b640-4aa6-bd12-54019f2065cf node DatanodeRegistration(127.0.0.1:43217, datanodeUuid=10ea4720-0575-4d65-b3e3-aa27bb890a75, infoPort=46151, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:56:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x829bcba9fdc66a7b with lease ID 0xe0959b6c01f15cc2: Processing first storage report for DS-3e3e5a86-fab2-4193-b9e2-2f035babc9d9 from datanode DatanodeRegistration(127.0.0.1:43217, datanodeUuid=10ea4720-0575-4d65-b3e3-aa27bb890a75, infoPort=46151, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907) 2024-11-14T09:56:33,200 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x829bcba9fdc66a7b with lease ID 0xe0959b6c01f15cc2: from storage DS-3e3e5a86-fab2-4193-b9e2-2f035babc9d9 node DatanodeRegistration(127.0.0.1:43217, datanodeUuid=10ea4720-0575-4d65-b3e3-aa27bb890a75, infoPort=46151, infoSecurePort=0, ipcPort=36553, storageInfo=lv=-57;cid=testClusterID;nsid=279823373;c=1731578190907), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-14T09:56:33,263 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e 2024-11-14T09:56:33,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/zookeeper_0, clientPort=55385, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:56:33,266 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55385 2024-11-14T09:56:33,267 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,269 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:56:33,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:56:33,287 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff with version=8 2024-11-14T09:56:33,287 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:56:33,289 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:56:33,289 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:56:33,290 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44985 2024-11-14T09:56:33,292 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:44985 connecting to ZooKeeper ensemble=127.0.0.1:55385 2024-11-14T09:56:33,348 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:449850x0, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:56:33,349 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:44985-0x10138c6611c0000 connected 2024-11-14T09:56:33,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:33,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:33,424 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,426 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,428 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:56:33,429 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff, hbase.cluster.distributed=false 2024-11-14T09:56:33,430 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:56:33,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44985 2024-11-14T09:56:33,442 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44985 2024-11-14T09:56:33,448 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44985 2024-11-14T09:56:33,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44985 2024-11-14T09:56:33,450 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44985 2024-11-14T09:56:33,468 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:56:33,469 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:56:33,470 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41757 2024-11-14T09:56:33,472 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41757 connecting to ZooKeeper ensemble=127.0.0.1:55385 2024-11-14T09:56:33,473 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,475 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,487 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:417570x0, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:56:33,488 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:417570x0, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:56:33,488 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41757-0x10138c6611c0001 connected 2024-11-14T09:56:33,488 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:56:33,489 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:56:33,490 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:56:33,491 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:56:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41757 2024-11-14T09:56:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41757 2024-11-14T09:56:33,492 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41757 2024-11-14T09:56:33,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41757 2024-11-14T09:56:33,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41757 2024-11-14T09:56:33,509 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:44985 2024-11-14T09:56:33,512 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:56:33,519 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:56:33,519 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:56:33,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,529 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,530 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:56:33,530 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,44985,1731578193289 from backup master directory 2024-11-14T09:56:33,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:56:33,540 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:56:33,540 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:56:33,540 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,544 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/hbase.id] with ID: 7ed42609-eada-43ce-bf75-91daee5fb426 2024-11-14T09:56:33,544 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/.tmp/hbase.id 2024-11-14T09:56:33,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:56:33,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:56:33,554 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/.tmp/hbase.id]:[hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/hbase.id] 2024-11-14T09:56:33,567 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:33,567 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:56:33,568 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:56:33,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,581 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:56:33,592 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:56:33,593 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:56:33,594 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:56:33,594 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:56:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:56:33,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:56:33,603 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store 2024-11-14T09:56:33,613 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:56:33,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:56:33,614 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:33,615 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:56:33,615 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:33,615 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:33,615 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:56:33,615 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:33,615 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:56:33,615 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578193614Disabling compacts and flushes for region at 1731578193614Disabling writes for close at 1731578193615 (+1 ms)Writing region close event to WAL at 1731578193615Closed at 1731578193615 2024-11-14T09:56:33,616 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/.initializing 2024-11-14T09:56:33,616 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/WALs/defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C44985%2C1731578193289, suffix=, logDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/WALs/defc576eb6b7,44985,1731578193289, archiveDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/oldWALs, maxLogs=10 2024-11-14T09:56:33,619 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C44985%2C1731578193289.1731578193619 2024-11-14T09:56:33,623 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/WALs/defc576eb6b7,44985,1731578193289/defc576eb6b7%2C44985%2C1731578193289.1731578193619 2024-11-14T09:56:33,624 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46151:46151),(127.0.0.1/127.0.0.1:37187:37187)] 2024-11-14T09:56:33,628 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:56:33,629 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:33,629 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,629 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,630 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:56:33,632 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:33,632 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,634 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:56:33,634 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:56:33,635 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,636 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:56:33,637 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:56:33,637 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:56:33,639 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,639 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:56:33,640 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,640 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,641 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,642 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,642 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,643 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:56:33,644 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:56:33,646 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:56:33,646 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=873525, jitterRate=0.11074474453926086}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:56:33,647 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578193629Initializing all the Stores at 1731578193630 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578193630Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578193630Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578193630Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578193630Cleaning up temporary data from old regions at 1731578193642 (+12 ms)Region opened successfully at 1731578193647 (+5 ms) 2024-11-14T09:56:33,647 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:56:33,651 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@438e4176, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:56:33,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:56:33,651 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:56:33,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:56:33,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:56:33,652 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:56:33,653 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:56:33,653 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:56:33,658 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:56:33,659 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:56:33,666 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:56:33,666 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:56:33,667 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:56:33,676 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:56:33,677 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:56:33,678 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:56:33,687 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:56:33,688 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:56:33,698 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:56:33,700 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:56:33,708 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:56:33,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:56:33,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:56:33,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,719 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,720 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,44985,1731578193289, sessionid=0x10138c6611c0000, setting cluster-up flag (Was=false) 2024-11-14T09:56:33,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,740 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,771 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:56:33,773 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,793 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:33,824 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:56:33,825 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,44985,1731578193289 2024-11-14T09:56:33,827 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:56:33,828 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:56:33,829 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:56:33,829 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:56:33,829 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,44985,1731578193289 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:56:33,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,832 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578223832 2024-11-14T09:56:33,832 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:56:33,832 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:56:33,832 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:56:33,832 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:56:33,833 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:56:33,833 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:56:33,833 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,833 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:56:33,833 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:56:33,833 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:56:33,834 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:56:33,834 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:56:33,834 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:56:33,834 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:56:33,834 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578193834,5,FailOnTimeoutGroup] 2024-11-14T09:56:33,835 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578193834,5,FailOnTimeoutGroup] 2024-11-14T09:56:33,835 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,835 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:56:33,835 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,835 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,835 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,835 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:56:33,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:56:33,845 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:56:33,846 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:56:33,846 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff 2024-11-14T09:56:33,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:56:33,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:56:33,852 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:33,854 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:56:33,855 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:56:33,855 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:33,855 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:56:33,857 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:56:33,857 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:33,857 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:56:33,858 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:56:33,858 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:33,859 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:56:33,860 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:56:33,860 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:33,861 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:33,861 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:56:33,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740 2024-11-14T09:56:33,862 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740 2024-11-14T09:56:33,864 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:56:33,864 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:56:33,864 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:56:33,866 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:56:33,868 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:56:33,869 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=704958, jitterRate=-0.10360056161880493}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:56:33,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578193852Initializing all the Stores at 1731578193853 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578193853Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578193853Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578193853Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578193853Cleaning up temporary data from old regions at 1731578193864 (+11 ms)Region opened successfully at 1731578193869 (+5 ms) 2024-11-14T09:56:33,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:56:33,869 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:56:33,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:56:33,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:56:33,869 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:56:33,870 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:56:33,870 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578193869Disabling compacts and flushes for region at 1731578193869Disabling writes for close at 1731578193869Writing region close event to WAL at 1731578193870 (+1 ms)Closed at 1731578193870 2024-11-14T09:56:33,871 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:56:33,871 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:56:33,871 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:56:33,873 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:56:33,874 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:56:33,898 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(746): ClusterId : 7ed42609-eada-43ce-bf75-91daee5fb426 2024-11-14T09:56:33,898 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:56:33,909 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:56:33,909 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:56:33,919 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:56:33,920 DEBUG [RS:0;defc576eb6b7:41757 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@9b86745, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:56:33,934 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:41757 2024-11-14T09:56:33,934 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:56:33,934 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:56:33,934 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:56:33,935 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,44985,1731578193289 with port=41757, startcode=1731578193468 2024-11-14T09:56:33,935 DEBUG [RS:0;defc576eb6b7:41757 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:56:33,937 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58665, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:56:33,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44985 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,41757,1731578193468 2024-11-14T09:56:33,938 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=44985 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:33,939 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff 2024-11-14T09:56:33,939 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:43215 2024-11-14T09:56:33,939 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:56:33,950 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:56:33,951 DEBUG [RS:0;defc576eb6b7:41757 {}] zookeeper.ZKUtil(111): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,41757,1731578193468 2024-11-14T09:56:33,951 WARN [RS:0;defc576eb6b7:41757 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:56:33,951 INFO [RS:0;defc576eb6b7:41757 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:56:33,951 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468 2024-11-14T09:56:33,951 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,41757,1731578193468] 2024-11-14T09:56:33,954 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:56:33,956 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:56:33,956 INFO [RS:0;defc576eb6b7:41757 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:56:33,956 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,960 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:56:33,961 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:56:33,961 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,961 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,961 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,961 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,961 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,961 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:56:33,962 DEBUG [RS:0;defc576eb6b7:41757 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:56:33,968 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,968 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,969 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,969 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,969 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,969 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,41757,1731578193468-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:56:33,983 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:56:33,983 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,41757,1731578193468-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,983 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,983 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.Replication(171): defc576eb6b7,41757,1731578193468 started 2024-11-14T09:56:33,998 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:33,998 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,41757,1731578193468, RpcServer on defc576eb6b7/172.17.0.2:41757, sessionid=0x10138c6611c0001 2024-11-14T09:56:33,999 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:56:33,999 DEBUG [RS:0;defc576eb6b7:41757 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,41757,1731578193468 2024-11-14T09:56:33,999 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,41757,1731578193468' 2024-11-14T09:56:33,999 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:56:33,999 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,41757,1731578193468 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,41757,1731578193468' 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:56:34,000 DEBUG [RS:0;defc576eb6b7:41757 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:56:34,001 INFO [RS:0;defc576eb6b7:41757 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:56:34,001 INFO [RS:0;defc576eb6b7:41757 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:56:34,024 WARN [defc576eb6b7:44985 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:56:34,102 INFO [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C41757%2C1731578193468, suffix=, logDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468, archiveDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs, maxLogs=32 2024-11-14T09:56:34,103 INFO [RS:0;defc576eb6b7:41757 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41757%2C1731578193468.1731578194103 2024-11-14T09:56:34,108 INFO [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578194103 2024-11-14T09:56:34,109 DEBUG [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46151:46151),(127.0.0.1/127.0.0.1:37187:37187)] 2024-11-14T09:56:34,275 DEBUG [defc576eb6b7:44985 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:56:34,275 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:34,277 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,41757,1731578193468, state=OPENING 2024-11-14T09:56:34,340 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:56:34,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:34,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:56:34,351 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:56:34,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:56:34,351 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:56:34,351 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,41757,1731578193468}] 2024-11-14T09:56:34,371 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:34,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:34,505 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:56:34,507 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60159, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:56:34,512 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:56:34,512 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:56:34,514 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C41757%2C1731578193468.meta, suffix=.meta, logDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468, archiveDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs, maxLogs=32 2024-11-14T09:56:34,515 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41757%2C1731578193468.meta.1731578194515.meta 2024-11-14T09:56:34,529 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.meta.1731578194515.meta 2024-11-14T09:56:34,532 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46151:46151),(127.0.0.1/127.0.0.1:37187:37187)] 2024-11-14T09:56:34,534 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:56:34,534 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:56:34,534 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:56:34,534 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:56:34,535 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:56:34,535 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:34,535 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:56:34,535 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:56:34,536 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:56:34,537 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:56:34,537 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:34,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:34,538 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:56:34,538 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:56:34,538 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:34,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:34,539 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:56:34,540 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:56:34,540 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:34,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:34,540 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:56:34,541 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:56:34,541 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:34,541 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:56:34,541 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:56:34,542 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740 2024-11-14T09:56:34,543 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740 2024-11-14T09:56:34,544 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:56:34,544 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:56:34,544 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:56:34,546 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:56:34,547 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=852935, jitterRate=0.08456304669380188}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:56:34,547 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:56:34,547 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578194535Writing region info on filesystem at 1731578194535Initializing all the Stores at 1731578194536 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578194536Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578194536Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578194536Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578194536Cleaning up temporary data from old regions at 1731578194544 (+8 ms)Running coprocessor post-open hooks at 1731578194547 (+3 ms)Region opened successfully at 1731578194547 2024-11-14T09:56:34,548 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578194504 2024-11-14T09:56:34,551 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:56:34,551 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:56:34,551 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:34,553 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,41757,1731578193468, state=OPEN 2024-11-14T09:56:34,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:56:34,591 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:56:34,591 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:34,592 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:56:34,592 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:56:34,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:56:34,596 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,41757,1731578193468 in 241 msec 2024-11-14T09:56:34,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:56:34,600 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 725 msec 2024-11-14T09:56:34,601 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:56:34,601 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:56:34,603 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:56:34,603 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,41757,1731578193468, seqNum=-1] 2024-11-14T09:56:34,603 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:56:34,605 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44015, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:56:34,611 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 782 msec 2024-11-14T09:56:34,611 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578194611, completionTime=-1 2024-11-14T09:56:34,611 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:56:34,611 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:56:34,613 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:56:34,613 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578254613 2024-11-14T09:56:34,613 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578314613 2024-11-14T09:56:34,613 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:44985, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,614 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,616 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.078sec 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:56:34,618 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:56:34,622 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:56:34,622 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:56:34,622 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,44985,1731578193289-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:56:34,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@56dc617a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:56:34,699 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,44985,-1 for getting cluster id 2024-11-14T09:56:34,699 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:56:34,701 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7ed42609-eada-43ce-bf75-91daee5fb426' 2024-11-14T09:56:34,701 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:56:34,702 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7ed42609-eada-43ce-bf75-91daee5fb426" 2024-11-14T09:56:34,702 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5372ab11, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:56:34,702 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,44985,-1] 2024-11-14T09:56:34,702 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:56:34,702 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:56:34,704 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41156, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:56:34,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@578206ed, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:56:34,706 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:56:34,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,41757,1731578193468, seqNum=-1] 2024-11-14T09:56:34,707 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:56:34,709 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55884, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:56:34,711 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,44985,1731578193289 2024-11-14T09:56:34,711 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:56:34,713 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:56:34,713 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:56:34,714 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is defc576eb6b7,44985,1731578193289 2024-11-14T09:56:34,714 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@307a19aa 2024-11-14T09:56:34,715 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:56:34,716 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41166, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:56:34,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:56:34,716 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:56:34,717 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:56:34,718 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:34,720 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:56:34,720 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:34,720 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-14T09:56:34,721 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:56:34,721 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:56:34,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741835_1011 (size=405) 2024-11-14T09:56:34,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741835_1011 (size=405) 2024-11-14T09:56:34,734 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 5cfcadd4fe90d6ef701c5bb6fcab1b9f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff 2024-11-14T09:56:34,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741836_1012 (size=88) 2024-11-14T09:56:34,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741836_1012 (size=88) 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 5cfcadd4fe90d6ef701c5bb6fcab1b9f, disabling compactions & flushes 2024-11-14T09:56:34,740 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. after waiting 0 ms 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:34,740 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:34,740 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: Waiting for close lock at 1731578194740Disabling compacts and flushes for region at 1731578194740Disabling writes for close at 1731578194740Writing region close event to WAL at 1731578194740Closed at 1731578194740 2024-11-14T09:56:34,741 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:56:34,742 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1731578194741"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578194741"}]},"ts":"1731578194741"} 2024-11-14T09:56:34,744 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:56:34,745 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:56:34,746 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578194745"}]},"ts":"1731578194745"} 2024-11-14T09:56:34,749 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-14T09:56:34,749 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5cfcadd4fe90d6ef701c5bb6fcab1b9f, ASSIGN}] 2024-11-14T09:56:34,751 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5cfcadd4fe90d6ef701c5bb6fcab1b9f, ASSIGN 2024-11-14T09:56:34,752 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5cfcadd4fe90d6ef701c5bb6fcab1b9f, ASSIGN; state=OFFLINE, location=defc576eb6b7,41757,1731578193468; forceNewPlan=false, retain=false 2024-11-14T09:56:34,902 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5cfcadd4fe90d6ef701c5bb6fcab1b9f, regionState=OPENING, regionLocation=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:34,905 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5cfcadd4fe90d6ef701c5bb6fcab1b9f, ASSIGN because future has completed 2024-11-14T09:56:34,906 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cfcadd4fe90d6ef701c5bb6fcab1b9f, server=defc576eb6b7,41757,1731578193468}] 2024-11-14T09:56:35,064 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:35,064 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 5cfcadd4fe90d6ef701c5bb6fcab1b9f, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:56:35,064 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,064 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:56:35,064 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,065 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,066 INFO [StoreOpener-5cfcadd4fe90d6ef701c5bb6fcab1b9f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,068 INFO [StoreOpener-5cfcadd4fe90d6ef701c5bb6fcab1b9f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 5cfcadd4fe90d6ef701c5bb6fcab1b9f columnFamilyName info 2024-11-14T09:56:35,068 DEBUG [StoreOpener-5cfcadd4fe90d6ef701c5bb6fcab1b9f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:56:35,069 INFO [StoreOpener-5cfcadd4fe90d6ef701c5bb6fcab1b9f-1 {}] regionserver.HStore(327): Store=5cfcadd4fe90d6ef701c5bb6fcab1b9f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:56:35,069 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,070 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,070 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,071 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,071 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,073 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,075 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:56:35,076 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 5cfcadd4fe90d6ef701c5bb6fcab1b9f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=785579, jitterRate=-0.0010858327150344849}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:56:35,076 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:56:35,077 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: Running coprocessor pre-open hook at 1731578195065Writing region info on filesystem at 1731578195065Initializing all the Stores at 1731578195066 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578195066Cleaning up temporary data from old regions at 1731578195071 (+5 ms)Running coprocessor post-open hooks at 1731578195076 (+5 ms)Region opened successfully at 1731578195077 (+1 ms) 2024-11-14T09:56:35,078 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f., pid=6, masterSystemTime=1731578195058 2024-11-14T09:56:35,081 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:35,081 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:35,082 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=5cfcadd4fe90d6ef701c5bb6fcab1b9f, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,41757,1731578193468 2024-11-14T09:56:35,084 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 5cfcadd4fe90d6ef701c5bb6fcab1b9f, server=defc576eb6b7,41757,1731578193468 because future has completed 2024-11-14T09:56:35,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:56:35,089 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 5cfcadd4fe90d6ef701c5bb6fcab1b9f, server=defc576eb6b7,41757,1731578193468 in 180 msec 2024-11-14T09:56:35,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:56:35,092 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=5cfcadd4fe90d6ef701c5bb6fcab1b9f, ASSIGN in 340 msec 2024-11-14T09:56:35,093 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:56:35,093 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578195093"}]},"ts":"1731578195093"} 2024-11-14T09:56:35,095 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-14T09:56:35,096 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:56:35,099 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 379 msec 2024-11-14T09:56:35,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:35,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:36,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:36,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:37,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:37,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:37,561 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:56:37,562 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,563 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,564 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,581 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,582 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,583 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,586 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,587 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:37,590 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:56:38,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:38,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:39,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:39,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:39,955 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:56:39,956 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-14T09:56:40,376 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:40,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:41,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:41,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:42,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-14T09:56:42,058 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:42,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:56:42,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-14T09:56:42,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-14T09:56:42,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:42,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:42,059 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:56:42,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:42,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:43,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:43,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:44,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:44,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:44,773 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:56:44,773 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:56:44,773 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-14T09:56:44,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:44,778 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:44,782 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f., hostname=defc576eb6b7,41757,1731578193468, seqNum=2] 2024-11-14T09:56:44,790 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:44,796 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:44,797 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:56:44,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T09:56:44,799 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:56:44,801 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:56:44,963 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41757 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-14T09:56:44,964 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:44,964 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:56:44,983 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/49aac98386f1420c98e30325de5e35f5 is 1080, key is row0001/info:/1731578204783/Put/seqid=0 2024-11-14T09:56:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741837_1013 (size=6033) 2024-11-14T09:56:44,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741837_1013 (size=6033) 2024-11-14T09:56:44,989 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/49aac98386f1420c98e30325de5e35f5 2024-11-14T09:56:44,997 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/49aac98386f1420c98e30325de5e35f5 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5 2024-11-14T09:56:45,002 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5, entries=1, sequenceid=5, filesize=5.9 K 2024-11-14T09:56:45,004 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 39ms, sequenceid=5, compaction requested=false 2024-11-14T09:56:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: 2024-11-14T09:56:45,004 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:45,005 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-14T09:56:45,007 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-14T09:56:45,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T09:56:45,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 208 msec 2024-11-14T09:56:45,015 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 222 msec 2024-11-14T09:56:45,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:45,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:46,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:46,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:47,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:47,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:48,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:48,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:49,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:49,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:50,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:50,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:51,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:51,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:52,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:52,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:53,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:53,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:54,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:54,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 after 68067ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:54,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:54,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta after 68046ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor202.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-14T09:56:54,842 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-14T09:56:54,843 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:56:54,847 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:54,848 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:56:54,849 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T09:56:54,850 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:56:54,851 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:56:54,851 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:56:55,005 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41757 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-14T09:56:55,006 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:55,006 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:56:55,013 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/1c503a46a310422b80c03fb747e39aa6 is 1080, key is row0002/info:/1731578214844/Put/seqid=0 2024-11-14T09:56:55,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741838_1014 (size=6033) 2024-11-14T09:56:55,024 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741838_1014 (size=6033) 2024-11-14T09:56:55,025 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/1c503a46a310422b80c03fb747e39aa6 2024-11-14T09:56:55,035 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/1c503a46a310422b80c03fb747e39aa6 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6 2024-11-14T09:56:55,041 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6, entries=1, sequenceid=9, filesize=5.9 K 2024-11-14T09:56:55,042 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 36ms, sequenceid=9, compaction requested=false 2024-11-14T09:56:55,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: 2024-11-14T09:56:55,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:56:55,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-14T09:56:55,043 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-14T09:56:55,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-14T09:56:55,047 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 193 msec 2024-11-14T09:56:55,049 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-14T09:56:55,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:55,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:56,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:56,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:57,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:57,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:58,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:58,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:59,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:56:59,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:00,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:00,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:01,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:01,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:02,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:02,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:03,263 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:57:03,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:03,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:04,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:04,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:04,873 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-14T09:57:04,873 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:57:04,877 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41757%2C1731578193468.1731578224876 2024-11-14T09:57:04,882 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:04,882 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:04,882 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:04,882 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:04,882 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:04,882 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578194103 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578224876 2024-11-14T09:57:04,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46151:46151),(127.0.0.1/127.0.0.1:37187:37187)] 2024-11-14T09:57:04,883 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578194103 is not closed yet, will try archiving it next time 2024-11-14T09:57:04,884 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:57:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741833_1009 (size=5546) 2024-11-14T09:57:04,885 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741833_1009 (size=5546) 2024-11-14T09:57:04,886 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:57:04,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T09:57:04,887 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:57:04,889 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:57:04,889 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:57:05,042 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41757 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-14T09:57:05,042 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:05,043 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:57:05,047 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/9a80c97b8205436397c7054f8f4363a5 is 1080, key is row0003/info:/1731578224875/Put/seqid=0 2024-11-14T09:57:05,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741840_1016 (size=6033) 2024-11-14T09:57:05,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741840_1016 (size=6033) 2024-11-14T09:57:05,053 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/9a80c97b8205436397c7054f8f4363a5 2024-11-14T09:57:05,062 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/9a80c97b8205436397c7054f8f4363a5 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5 2024-11-14T09:57:05,069 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5, entries=1, sequenceid=13, filesize=5.9 K 2024-11-14T09:57:05,070 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 26ms, sequenceid=13, compaction requested=true 2024-11-14T09:57:05,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: 2024-11-14T09:57:05,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:05,070 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-14T09:57:05,070 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-14T09:57:05,074 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-14T09:57:05,075 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-14T09:57:05,077 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 191 msec 2024-11-14T09:57:05,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:05,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:06,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:06,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:07,396 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:07,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:08,397 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:08,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:09,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:09,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:10,398 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:10,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:11,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:11,399 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:12,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:12,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:13,400 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:13,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:14,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:14,401 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:14,982 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-14T09:57:14,983 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:57:14,983 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:57:14,984 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:57:14,984 DEBUG [Time-limited test {}] regionserver.HStore(1541): 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info is initiating minor compaction (all files) 2024-11-14T09:57:14,985 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:57:14,985 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:14,985 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:14,985 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5] into tmpdir=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp, totalSize=17.7 K 2024-11-14T09:57:14,985 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 49aac98386f1420c98e30325de5e35f5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1731578204783 2024-11-14T09:57:14,986 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 1c503a46a310422b80c03fb747e39aa6, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1731578214844 2024-11-14T09:57:14,986 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9a80c97b8205436397c7054f8f4363a5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1731578224875 2024-11-14T09:57:15,000 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 5cfcadd4fe90d6ef701c5bb6fcab1b9f#info#compaction#46 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:57:15,001 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/e7f667933bd143f1a01747e3b3b8f4c9 is 1080, key is row0001/info:/1731578204783/Put/seqid=0 2024-11-14T09:57:15,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741841_1017 (size=8296) 2024-11-14T09:57:15,006 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741841_1017 (size=8296) 2024-11-14T09:57:15,013 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/e7f667933bd143f1a01747e3b3b8f4c9 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/e7f667933bd143f1a01747e3b3b8f4c9 2024-11-14T09:57:15,020 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info of 5cfcadd4fe90d6ef701c5bb6fcab1b9f into e7f667933bd143f1a01747e3b3b8f4c9(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:57:15,020 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: 2024-11-14T09:57:15,023 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41757%2C1731578193468.1731578235022 2024-11-14T09:57:15,028 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:15,029 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:15,029 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:15,029 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:15,029 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:15,029 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578224876 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578235022 2024-11-14T09:57:15,030 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37187:37187),(127.0.0.1/127.0.0.1:46151:46151)] 2024-11-14T09:57:15,030 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578224876 is not closed yet, will try archiving it next time 2024-11-14T09:57:15,030 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578194103 to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs/defc576eb6b7%2C41757%2C1731578193468.1731578194103 2024-11-14T09:57:15,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741839_1015 (size=2520) 2024-11-14T09:57:15,031 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:57:15,031 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741839_1015 (size=2520) 2024-11-14T09:57:15,032 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:57:15,033 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T09:57:15,033 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-14T09:57:15,034 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-14T09:57:15,035 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-14T09:57:15,188 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41757 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-14T09:57:15,188 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:15,188 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:57:15,193 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2f0efef7586e417e84aa682b7870a05e is 1080, key is row0000/info:/1731578235021/Put/seqid=0 2024-11-14T09:57:15,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741843_1019 (size=6033) 2024-11-14T09:57:15,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741843_1019 (size=6033) 2024-11-14T09:57:15,198 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2f0efef7586e417e84aa682b7870a05e 2024-11-14T09:57:15,204 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2f0efef7586e417e84aa682b7870a05e as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/2f0efef7586e417e84aa682b7870a05e 2024-11-14T09:57:15,211 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/2f0efef7586e417e84aa682b7870a05e, entries=1, sequenceid=18, filesize=5.9 K 2024-11-14T09:57:15,212 INFO [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 24ms, sequenceid=18, compaction requested=false 2024-11-14T09:57:15,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: 2024-11-14T09:57:15,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:15,212 DEBUG [RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-14T09:57:15,213 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-14T09:57:15,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-14T09:57:15,217 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 179 msec 2024-11-14T09:57:15,220 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 187 msec 2024-11-14T09:57:15,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:15,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:15,430 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:57:15,430 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:57:16,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:16,402 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:17,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:17,403 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:18,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:18,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:19,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:19,404 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:20,064 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 5cfcadd4fe90d6ef701c5bb6fcab1b9f, had cached 0 bytes from a total of 14329 2024-11-14T09:57:20,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:20,405 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:21,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:21,406 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:22,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:22,407 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:23,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:23,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:24,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:24,408 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:25,053 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=44985 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-14T09:57:25,054 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-14T09:57:25,058 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C41757%2C1731578193468.1731578245058 2024-11-14T09:57:25,066 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,066 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,066 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,067 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,067 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,067 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578235022 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578245058 2024-11-14T09:57:25,068 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46151:46151),(127.0.0.1/127.0.0.1:37187:37187)] 2024-11-14T09:57:25,068 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578235022 is not closed yet, will try archiving it next time 2024-11-14T09:57:25,068 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/WALs/defc576eb6b7,41757,1731578193468/defc576eb6b7%2C41757%2C1731578193468.1731578224876 to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs/defc576eb6b7%2C41757%2C1731578193468.1731578224876 2024-11-14T09:57:25,068 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:57:25,069 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:57:25,069 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:57:25,069 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741842_1018 (size=2026) 2024-11-14T09:57:25,069 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:25,070 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:25,070 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:57:25,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:57:25,070 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=716488272, stopped=false 2024-11-14T09:57:25,070 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,44985,1731578193289 2024-11-14T09:57:25,071 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741842_1018 (size=2026) 2024-11-14T09:57:25,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:57:25,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:57:25,148 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:57:25,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:25,148 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:25,149 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:57:25,149 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:57:25,149 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:57:25,149 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:57:25,149 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:25,150 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,41757,1731578193468' ***** 2024-11-14T09:57:25,150 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:57:25,150 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:57:25,150 INFO [RS:0;defc576eb6b7:41757 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:57:25,150 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:57:25,150 INFO [RS:0;defc576eb6b7:41757 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:57:25,150 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(3091): Received CLOSE for 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,41757,1731578193468 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:41757. 2024-11-14T09:57:25,151 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 5cfcadd4fe90d6ef701c5bb6fcab1b9f, disabling compactions & flushes 2024-11-14T09:57:25,151 DEBUG [RS:0;defc576eb6b7:41757 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:57:25,151 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:25,151 DEBUG [RS:0;defc576eb6b7:41757 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:25,151 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:57:25,151 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. after waiting 0 ms 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:57:25,151 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:57:25,151 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:57:25,152 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-14T09:57:25,152 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-14T09:57:25,152 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1325): Online Regions={5cfcadd4fe90d6ef701c5bb6fcab1b9f=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:57:25,152 DEBUG [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 5cfcadd4fe90d6ef701c5bb6fcab1b9f 2024-11-14T09:57:25,152 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:57:25,152 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:57:25,152 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:57:25,152 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:57:25,152 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:57:25,152 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-14T09:57:25,158 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2fc9ec1456bf4268877a52e6899a7a1b is 1080, key is row0001/info:/1731578245056/Put/seqid=0 2024-11-14T09:57:25,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741845_1021 (size=6033) 2024-11-14T09:57:25,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741845_1021 (size=6033) 2024-11-14T09:57:25,163 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2fc9ec1456bf4268877a52e6899a7a1b 2024-11-14T09:57:25,170 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/.tmp/info/2fc9ec1456bf4268877a52e6899a7a1b as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/2fc9ec1456bf4268877a52e6899a7a1b 2024-11-14T09:57:25,173 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/info/e27553b7d126480ebd25f352705b3419 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f./info:regioninfo/1731578195082/Put/seqid=0 2024-11-14T09:57:25,177 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/2fc9ec1456bf4268877a52e6899a7a1b, entries=1, sequenceid=22, filesize=5.9 K 2024-11-14T09:57:25,178 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 27ms, sequenceid=22, compaction requested=true 2024-11-14T09:57:25,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741846_1022 (size=7308) 2024-11-14T09:57:25,189 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741846_1022 (size=7308) 2024-11-14T09:57:25,189 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/info/e27553b7d126480ebd25f352705b3419 2024-11-14T09:57:25,191 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5] to archive 2024-11-14T09:57:25,192 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:57:25,193 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5 to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/49aac98386f1420c98e30325de5e35f5 2024-11-14T09:57:25,194 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6 to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/1c503a46a310422b80c03fb747e39aa6 2024-11-14T09:57:25,196 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5 to hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/info/9a80c97b8205436397c7054f8f4363a5 2024-11-14T09:57:25,196 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=defc576eb6b7:44985 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T09:57:25,196 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [49aac98386f1420c98e30325de5e35f5=6033, 1c503a46a310422b80c03fb747e39aa6=6033, 9a80c97b8205436397c7054f8f4363a5=6033] 2024-11-14T09:57:25,200 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/5cfcadd4fe90d6ef701c5bb6fcab1b9f/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-14T09:57:25,200 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:25,201 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 5cfcadd4fe90d6ef701c5bb6fcab1b9f: Waiting for close lock at 1731578245151Running coprocessor pre-close hooks at 1731578245151Disabling compacts and flushes for region at 1731578245151Disabling writes for close at 1731578245151Obtaining lock to block concurrent updates at 1731578245152 (+1 ms)Preparing flush snapshotting stores in 5cfcadd4fe90d6ef701c5bb6fcab1b9f at 1731578245152Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1731578245152Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. at 1731578245153 (+1 ms)Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info: creating writer at 1731578245153Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info: appending metadata at 1731578245157 (+4 ms)Flushing 5cfcadd4fe90d6ef701c5bb6fcab1b9f/info: closing flushed file at 1731578245157Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7c0e06d7: reopening flushed file at 1731578245169 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 5cfcadd4fe90d6ef701c5bb6fcab1b9f in 27ms, sequenceid=22, compaction requested=true at 1731578245178 (+9 ms)Writing region close event to WAL at 1731578245197 (+19 ms)Running coprocessor post-close hooks at 1731578245200 (+3 ms)Closed at 1731578245200 2024-11-14T09:57:25,201 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1731578194716.5cfcadd4fe90d6ef701c5bb6fcab1b9f. 2024-11-14T09:57:25,210 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/ns/844076c8c6f74f048ce4cf8ce8f785a2 is 43, key is default/ns:d/1731578194605/Put/seqid=0 2024-11-14T09:57:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741847_1023 (size=5153) 2024-11-14T09:57:25,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741847_1023 (size=5153) 2024-11-14T09:57:25,215 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/ns/844076c8c6f74f048ce4cf8ce8f785a2 2024-11-14T09:57:25,234 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/table/be50cb59f50a4f98864462a50b7d30aa is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1731578195093/Put/seqid=0 2024-11-14T09:57:25,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741848_1024 (size=5508) 2024-11-14T09:57:25,239 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741848_1024 (size=5508) 2024-11-14T09:57:25,239 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/table/be50cb59f50a4f98864462a50b7d30aa 2024-11-14T09:57:25,245 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/info/e27553b7d126480ebd25f352705b3419 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/info/e27553b7d126480ebd25f352705b3419 2024-11-14T09:57:25,250 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/info/e27553b7d126480ebd25f352705b3419, entries=10, sequenceid=11, filesize=7.1 K 2024-11-14T09:57:25,251 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/ns/844076c8c6f74f048ce4cf8ce8f785a2 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/ns/844076c8c6f74f048ce4cf8ce8f785a2 2024-11-14T09:57:25,256 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/ns/844076c8c6f74f048ce4cf8ce8f785a2, entries=2, sequenceid=11, filesize=5.0 K 2024-11-14T09:57:25,257 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/.tmp/table/be50cb59f50a4f98864462a50b7d30aa as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/table/be50cb59f50a4f98864462a50b7d30aa 2024-11-14T09:57:25,263 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/table/be50cb59f50a4f98864462a50b7d30aa, entries=2, sequenceid=11, filesize=5.4 K 2024-11-14T09:57:25,264 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 112ms, sequenceid=11, compaction requested=false 2024-11-14T09:57:25,269 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-14T09:57:25,269 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:57:25,269 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:57:25,270 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578245152Running coprocessor pre-close hooks at 1731578245152Disabling compacts and flushes for region at 1731578245152Disabling writes for close at 1731578245152Obtaining lock to block concurrent updates at 1731578245152Preparing flush snapshotting stores in 1588230740 at 1731578245152Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1731578245153 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1731578245154 (+1 ms)Flushing 1588230740/info: creating writer at 1731578245154Flushing 1588230740/info: appending metadata at 1731578245172 (+18 ms)Flushing 1588230740/info: closing flushed file at 1731578245172Flushing 1588230740/ns: creating writer at 1731578245195 (+23 ms)Flushing 1588230740/ns: appending metadata at 1731578245209 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1731578245209Flushing 1588230740/table: creating writer at 1731578245220 (+11 ms)Flushing 1588230740/table: appending metadata at 1731578245233 (+13 ms)Flushing 1588230740/table: closing flushed file at 1731578245233Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2bf5bb90: reopening flushed file at 1731578245244 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cafd9b7: reopening flushed file at 1731578245250 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3e83d475: reopening flushed file at 1731578245256 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 112ms, sequenceid=11, compaction requested=false at 1731578245264 (+8 ms)Writing region close event to WAL at 1731578245266 (+2 ms)Running coprocessor post-close hooks at 1731578245269 (+3 ms)Closed at 1731578245269 2024-11-14T09:57:25,270 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:57:25,352 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,41757,1731578193468; all regions closed. 2024-11-14T09:57:25,353 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,353 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,353 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,353 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,353 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741834_1010 (size=3306) 2024-11-14T09:57:25,355 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741834_1010 (size=3306) 2024-11-14T09:57:25,358 DEBUG [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs 2024-11-14T09:57:25,358 INFO [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C41757%2C1731578193468.meta:.meta(num 1731578194515) 2024-11-14T09:57:25,358 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,358 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,358 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,358 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,359 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741844_1020 (size=1252) 2024-11-14T09:57:25,361 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741844_1020 (size=1252) 2024-11-14T09:57:25,364 DEBUG [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/oldWALs 2024-11-14T09:57:25,364 INFO [RS:0;defc576eb6b7:41757 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C41757%2C1731578193468:(num 1731578245058) 2024-11-14T09:57:25,364 DEBUG [RS:0;defc576eb6b7:41757 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:25,364 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:57:25,364 INFO [RS:0;defc576eb6b7:41757 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:57:25,364 INFO [RS:0;defc576eb6b7:41757 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-14T09:57:25,364 INFO [RS:0;defc576eb6b7:41757 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:57:25,364 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:57:25,365 INFO [RS:0;defc576eb6b7:41757 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41757 2024-11-14T09:57:25,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:57:25,377 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,41757,1731578193468 2024-11-14T09:57:25,377 INFO [RS:0;defc576eb6b7:41757 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:57:25,390 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,41757,1731578193468] 2024-11-14T09:57:25,400 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,41757,1731578193468 already deleted, retry=false 2024-11-14T09:57:25,400 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,41757,1731578193468 expired; onlineServers=0 2024-11-14T09:57:25,400 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,44985,1731578193289' ***** 2024-11-14T09:57:25,400 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:57:25,400 INFO [M:0;defc576eb6b7:44985 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:57:25,400 INFO [M:0;defc576eb6b7:44985 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:57:25,401 DEBUG [M:0;defc576eb6b7:44985 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:57:25,401 DEBUG [M:0;defc576eb6b7:44985 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:57:25,401 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:57:25,401 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578193834 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578193834,5,FailOnTimeoutGroup] 2024-11-14T09:57:25,401 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578193834 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578193834,5,FailOnTimeoutGroup] 2024-11-14T09:57:25,401 INFO [M:0;defc576eb6b7:44985 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:57:25,401 INFO [M:0;defc576eb6b7:44985 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:57:25,401 DEBUG [M:0;defc576eb6b7:44985 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:57:25,401 INFO [M:0;defc576eb6b7:44985 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:57:25,401 INFO [M:0;defc576eb6b7:44985 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:57:25,401 INFO [M:0;defc576eb6b7:44985 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:57:25,401 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:57:25,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:25,409 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:25,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:57:25,411 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:25,411 DEBUG [M:0;defc576eb6b7:44985 {}] zookeeper.ZKUtil(347): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:57:25,411 WARN [M:0;defc576eb6b7:44985 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:57:25,412 INFO [M:0;defc576eb6b7:44985 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/.lastflushedseqids 2024-11-14T09:57:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741849_1025 (size=130) 2024-11-14T09:57:25,419 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741849_1025 (size=130) 2024-11-14T09:57:25,420 INFO [M:0;defc576eb6b7:44985 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:57:25,420 INFO [M:0;defc576eb6b7:44985 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:57:25,420 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:57:25,420 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:25,420 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:25,420 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:57:25,420 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:25,420 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.54 KB heapSize=54.91 KB 2024-11-14T09:57:25,437 DEBUG [M:0;defc576eb6b7:44985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98c3274d50514081b6b3316e931734fd is 82, key is hbase:meta,,1/info:regioninfo/1731578194551/Put/seqid=0 2024-11-14T09:57:25,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741850_1026 (size=5672) 2024-11-14T09:57:25,441 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741850_1026 (size=5672) 2024-11-14T09:57:25,442 INFO [M:0;defc576eb6b7:44985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98c3274d50514081b6b3316e931734fd 2024-11-14T09:57:25,465 DEBUG [M:0;defc576eb6b7:44985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b864f7b751af435eb2eed4683be7381d is 797, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731578195098/Put/seqid=0 2024-11-14T09:57:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741851_1027 (size=7818) 2024-11-14T09:57:25,469 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741851_1027 (size=7818) 2024-11-14T09:57:25,470 INFO [M:0;defc576eb6b7:44985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.94 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b864f7b751af435eb2eed4683be7381d 2024-11-14T09:57:25,475 INFO [M:0;defc576eb6b7:44985 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b864f7b751af435eb2eed4683be7381d 2024-11-14T09:57:25,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:57:25,490 INFO [RS:0;defc576eb6b7:41757 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:57:25,490 INFO [RS:0;defc576eb6b7:41757 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,41757,1731578193468; zookeeper connection closed. 2024-11-14T09:57:25,490 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41757-0x10138c6611c0001, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:57:25,490 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@25ff5656 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@25ff5656 2024-11-14T09:57:25,491 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:57:25,491 DEBUG [M:0;defc576eb6b7:44985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5465d4735f8e4223a8af41f216d07a3e is 69, key is defc576eb6b7,41757,1731578193468/rs:state/1731578193938/Put/seqid=0 2024-11-14T09:57:25,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741852_1028 (size=5156) 2024-11-14T09:57:25,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741852_1028 (size=5156) 2024-11-14T09:57:25,496 INFO [M:0;defc576eb6b7:44985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5465d4735f8e4223a8af41f216d07a3e 2024-11-14T09:57:25,516 DEBUG [M:0;defc576eb6b7:44985 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5820c7e597e42978f659c217f065780 is 52, key is load_balancer_on/state:d/1731578194712/Put/seqid=0 2024-11-14T09:57:25,520 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741853_1029 (size=5056) 2024-11-14T09:57:25,521 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741853_1029 (size=5056) 2024-11-14T09:57:25,521 INFO [M:0;defc576eb6b7:44985 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5820c7e597e42978f659c217f065780 2024-11-14T09:57:25,527 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/98c3274d50514081b6b3316e931734fd as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98c3274d50514081b6b3316e931734fd 2024-11-14T09:57:25,532 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/98c3274d50514081b6b3316e931734fd, entries=8, sequenceid=121, filesize=5.5 K 2024-11-14T09:57:25,533 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b864f7b751af435eb2eed4683be7381d as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b864f7b751af435eb2eed4683be7381d 2024-11-14T09:57:25,539 INFO [M:0;defc576eb6b7:44985 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for b864f7b751af435eb2eed4683be7381d 2024-11-14T09:57:25,539 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b864f7b751af435eb2eed4683be7381d, entries=14, sequenceid=121, filesize=7.6 K 2024-11-14T09:57:25,541 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5465d4735f8e4223a8af41f216d07a3e as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5465d4735f8e4223a8af41f216d07a3e 2024-11-14T09:57:25,547 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5465d4735f8e4223a8af41f216d07a3e, entries=1, sequenceid=121, filesize=5.0 K 2024-11-14T09:57:25,548 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/e5820c7e597e42978f659c217f065780 as hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e5820c7e597e42978f659c217f065780 2024-11-14T09:57:25,554 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:43215/user/jenkins/test-data/1bbe1211-e9eb-99e3-2b09-3be8bbb3caff/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/e5820c7e597e42978f659c217f065780, entries=1, sequenceid=121, filesize=4.9 K 2024-11-14T09:57:25,555 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false 2024-11-14T09:57:25,557 INFO [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:25,557 DEBUG [M:0;defc576eb6b7:44985 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578245420Disabling compacts and flushes for region at 1731578245420Disabling writes for close at 1731578245420Obtaining lock to block concurrent updates at 1731578245420Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578245420Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44590, getHeapSize=56168, getOffHeapSize=0, getCellsCount=140 at 1731578245421 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578245422 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578245422Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578245437 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578245437Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578245447 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578245464 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578245464Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578245475 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578245491 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578245491Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578245500 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578245516 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578245516Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4bcbeb2f: reopening flushed file at 1731578245526 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@223c5d0a: reopening flushed file at 1731578245532 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c67e095: reopening flushed file at 1731578245540 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2245e89a: reopening flushed file at 1731578245547 (+7 ms)Finished flush of dataSize ~43.54 KB/44590, heapSize ~54.85 KB/56168, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 135ms, sequenceid=121, compaction requested=false at 1731578245555 (+8 ms)Writing region close event to WAL at 1731578245557 (+2 ms)Closed at 1731578245557 2024-11-14T09:57:25,560 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,560 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,560 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:57:25,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37477 is added to blk_1073741830_1006 (size=52987) 2024-11-14T09:57:25,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43217 is added to blk_1073741830_1006 (size=52987) 2024-11-14T09:57:25,563 INFO [M:0;defc576eb6b7:44985 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:57:25,563 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:57:25,563 INFO [M:0;defc576eb6b7:44985 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44985 2024-11-14T09:57:25,564 INFO [M:0;defc576eb6b7:44985 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:57:25,669 INFO [M:0;defc576eb6b7:44985 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:57:25,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:57:25,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:44985-0x10138c6611c0000, quorum=127.0.0.1:55385, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:57:25,671 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4423b898{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:57:25,672 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c7e4fc9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:57:25,672 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:57:25,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1d69a14c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:57:25,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@435e4405{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,STOPPED} 2024-11-14T09:57:25,673 WARN [BP-1487274196-172.17.0.2-1731578190907 heartbeating to localhost/127.0.0.1:43215 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:57:25,673 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:57:25,674 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:57:25,674 WARN [BP-1487274196-172.17.0.2-1731578190907 heartbeating to localhost/127.0.0.1:43215 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1487274196-172.17.0.2-1731578190907 (Datanode Uuid 10ea4720-0575-4d65-b3e3-aa27bb890a75) service to localhost/127.0.0.1:43215 2024-11-14T09:57:25,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data3/current/BP-1487274196-172.17.0.2-1731578190907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:57:25,674 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data4/current/BP-1487274196-172.17.0.2-1731578190907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:57:25,675 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:57:25,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1ab597ae{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:57:25,677 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6ba04ef4{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:57:25,677 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:57:25,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1e078168{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:57:25,677 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1574660e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,STOPPED} 2024-11-14T09:57:25,679 WARN [BP-1487274196-172.17.0.2-1731578190907 heartbeating to localhost/127.0.0.1:43215 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:57:25,679 WARN [BP-1487274196-172.17.0.2-1731578190907 heartbeating to localhost/127.0.0.1:43215 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1487274196-172.17.0.2-1731578190907 (Datanode Uuid 7cef2426-2e35-4136-8f2c-c45d68b10bd7) service to localhost/127.0.0.1:43215 2024-11-14T09:57:25,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data1/current/BP-1487274196-172.17.0.2-1731578190907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:57:25,680 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/cluster_eb2e4023-7a13-b14d-0bee-1109066e8f93/data/data2/current/BP-1487274196-172.17.0.2-1731578190907 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:57:25,680 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:57:25,680 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:57:25,680 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:57:25,687 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5c49ee69{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:57:25,688 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@34381312{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:57:25,688 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:57:25,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7165585e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:57:25,689 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2941128e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir/,STOPPED} 2024-11-14T09:57:25,695 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:57:25,721 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:57:25,730 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:43215 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43215 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43215 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:43215 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43215 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43215 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43215 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:43215 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/defc576eb6b7:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:43215 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=242 (was 223) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=3606 (was 3819) 2024-11-14T09:57:25,739 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=242, ProcessCount=11, AvailableMemoryMB=3606 2024-11-14T09:57:25,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.log.dir so I do NOT create it in target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/f45af986-a215-914b-f259-153888a32b3e/hadoop.tmp.dir so I do NOT create it in target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4, deleteOnExit=true 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/test.cache.data in system properties and HBase conf 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:57:25,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:57:25,740 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:57:25,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:57:25,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:57:25,742 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:57:25,756 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:57:25,971 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:57:26,162 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:57:26,166 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:57:26,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:57:26,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:57:26,167 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:57:26,168 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:57:26,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34e3d9e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:57:26,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a2bf3b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:57:26,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7a75c30e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/java.io.tmpdir/jetty-localhost-34337-hadoop-hdfs-3_4_1-tests_jar-_-any-4155656836815727897/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:57:26,273 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1a0adda6{HTTP/1.1, (http/1.1)}{localhost:34337} 2024-11-14T09:57:26,273 INFO [Time-limited test {}] server.Server(415): Started @250908ms 2024-11-14T09:57:26,286 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:57:26,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:26,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:26,547 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:57:26,550 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:57:26,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:57:26,551 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:57:26,551 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:57:26,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4438ca54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:57:26,552 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79974a7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:57:26,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6901ebc1{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/java.io.tmpdir/jetty-localhost-44685-hadoop-hdfs-3_4_1-tests_jar-_-any-1946685572126873975/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:57:26,654 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4d7def5f{HTTP/1.1, (http/1.1)}{localhost:44685} 2024-11-14T09:57:26,654 INFO [Time-limited test {}] server.Server(415): Started @251289ms 2024-11-14T09:57:26,655 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:57:26,698 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:57:26,700 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:57:26,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:57:26,708 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:57:26,708 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:57:26,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5aa0bab9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:57:26,709 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3af484fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:57:26,816 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7f9d8b97{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/java.io.tmpdir/jetty-localhost-44705-hadoop-hdfs-3_4_1-tests_jar-_-any-7032823525045754113/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:57:26,816 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1435a214{HTTP/1.1, (http/1.1)}{localhost:44705} 2024-11-14T09:57:26,816 INFO [Time-limited test {}] server.Server(415): Started @251451ms 2024-11-14T09:57:26,817 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:57:27,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:27,410 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:27,911 WARN [Thread-1985 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data1/current/BP-1487981552-172.17.0.2-1731578245760/current, will proceed with Du for space computation calculation, 2024-11-14T09:57:27,911 WARN [Thread-1986 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data2/current/BP-1487981552-172.17.0.2-1731578245760/current, will proceed with Du for space computation calculation, 2024-11-14T09:57:27,927 WARN [Thread-1949 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:57:27,929 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6e42f83d53d1b34 with lease ID 0x752d3ff8e799c564: Processing first storage report for DS-c77b8961-bdf2-4504-8a8d-0816d8ad42aa from datanode DatanodeRegistration(127.0.0.1:43971, datanodeUuid=6dd64b2f-023d-4e82-8672-1c9526c8ffd9, infoPort=39729, infoSecurePort=0, ipcPort=43679, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760) 2024-11-14T09:57:27,929 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6e42f83d53d1b34 with lease ID 0x752d3ff8e799c564: from storage DS-c77b8961-bdf2-4504-8a8d-0816d8ad42aa node DatanodeRegistration(127.0.0.1:43971, datanodeUuid=6dd64b2f-023d-4e82-8672-1c9526c8ffd9, infoPort=39729, infoSecurePort=0, ipcPort=43679, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:57:27,929 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf6e42f83d53d1b34 with lease ID 0x752d3ff8e799c564: Processing first storage report for DS-8d39e627-feb7-4c46-84ee-9d53a447a953 from datanode DatanodeRegistration(127.0.0.1:43971, datanodeUuid=6dd64b2f-023d-4e82-8672-1c9526c8ffd9, infoPort=39729, infoSecurePort=0, ipcPort=43679, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760) 2024-11-14T09:57:27,929 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf6e42f83d53d1b34 with lease ID 0x752d3ff8e799c564: from storage DS-8d39e627-feb7-4c46-84ee-9d53a447a953 node DatanodeRegistration(127.0.0.1:43971, datanodeUuid=6dd64b2f-023d-4e82-8672-1c9526c8ffd9, infoPort=39729, infoSecurePort=0, ipcPort=43679, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:57:28,290 WARN [Thread-1996 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data3/current/BP-1487981552-172.17.0.2-1731578245760/current, will proceed with Du for space computation calculation, 2024-11-14T09:57:28,290 WARN [Thread-1997 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data4/current/BP-1487981552-172.17.0.2-1731578245760/current, will proceed with Du for space computation calculation, 2024-11-14T09:57:28,309 WARN [Thread-1972 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:57:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9dd018dde14f4363 with lease ID 0x752d3ff8e799c565: Processing first storage report for DS-7f9ed1b6-5746-49e7-98c9-67d7dbbafeaa from datanode DatanodeRegistration(127.0.0.1:34273, datanodeUuid=82059d17-3cdc-4403-b90b-51732aa1ecdc, infoPort=33689, infoSecurePort=0, ipcPort=41133, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760) 2024-11-14T09:57:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9dd018dde14f4363 with lease ID 0x752d3ff8e799c565: from storage DS-7f9ed1b6-5746-49e7-98c9-67d7dbbafeaa node DatanodeRegistration(127.0.0.1:34273, datanodeUuid=82059d17-3cdc-4403-b90b-51732aa1ecdc, infoPort=33689, infoSecurePort=0, ipcPort=41133, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:57:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9dd018dde14f4363 with lease ID 0x752d3ff8e799c565: Processing first storage report for DS-3b877b5d-5cb1-40ea-bdd9-a9055361c688 from datanode DatanodeRegistration(127.0.0.1:34273, datanodeUuid=82059d17-3cdc-4403-b90b-51732aa1ecdc, infoPort=33689, infoSecurePort=0, ipcPort=41133, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760) 2024-11-14T09:57:28,311 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9dd018dde14f4363 with lease ID 0x752d3ff8e799c565: from storage DS-3b877b5d-5cb1-40ea-bdd9-a9055361c688 node DatanodeRegistration(127.0.0.1:34273, datanodeUuid=82059d17-3cdc-4403-b90b-51732aa1ecdc, infoPort=33689, infoSecurePort=0, ipcPort=41133, storageInfo=lv=-57;cid=testClusterID;nsid=1922865472;c=1731578245760), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:57:28,350 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b 2024-11-14T09:57:28,354 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/zookeeper_0, clientPort=55186, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:57:28,354 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=55186 2024-11-14T09:57:28,355 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,356 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:57:28,367 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:57:28,368 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe with version=8 2024-11-14T09:57:28,368 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:57:28,370 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:57:28,371 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:57:28,372 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:46299 2024-11-14T09:57:28,373 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:46299 connecting to ZooKeeper ensemble=127.0.0.1:55186 2024-11-14T09:57:28,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:28,411 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:28,432 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:462990x0, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:57:28,432 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:46299-0x10138c7384b0000 connected 2024-11-14T09:57:28,632 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,634 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,637 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:57:28,637 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe, hbase.cluster.distributed=false 2024-11-14T09:57:28,639 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:57:28,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=46299 2024-11-14T09:57:28,640 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=46299 2024-11-14T09:57:28,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=46299 2024-11-14T09:57:28,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=46299 2024-11-14T09:57:28,641 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=46299 2024-11-14T09:57:28,658 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:57:28,658 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:57:28,659 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34147 2024-11-14T09:57:28,660 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34147 connecting to ZooKeeper ensemble=127.0.0.1:55186 2024-11-14T09:57:28,661 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,662 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,674 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:341470x0, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:57:28,674 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:341470x0, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:57:28,674 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34147-0x10138c7384b0001 connected 2024-11-14T09:57:28,675 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:57:28,675 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:57:28,676 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:57:28,677 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:57:28,677 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34147 2024-11-14T09:57:28,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34147 2024-11-14T09:57:28,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34147 2024-11-14T09:57:28,678 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34147 2024-11-14T09:57:28,679 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34147 2024-11-14T09:57:28,692 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:46299 2024-11-14T09:57:28,693 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:57:28,705 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:57:28,706 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:57:28,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,716 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,716 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:57:28,717 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,46299,1731578248370 from backup master directory 2024-11-14T09:57:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:57:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,726 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:57:28,726 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:57:28,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,731 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/hbase.id] with ID: e382a50c-b256-45f7-b09f-0077af87abf0 2024-11-14T09:57:28,731 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/.tmp/hbase.id 2024-11-14T09:57:28,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:57:28,736 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:57:28,737 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/.tmp/hbase.id]:[hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/hbase.id] 2024-11-14T09:57:28,747 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:28,747 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:57:28,748 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:57:28,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:57:28,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:57:28,764 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:57:28,765 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:57:28,765 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:57:28,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:57:28,772 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:57:28,773 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store 2024-11-14T09:57:28,780 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:57:28,782 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:57:28,783 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:28,783 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:57:28,783 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578248783Disabling compacts and flushes for region at 1731578248783Disabling writes for close at 1731578248783Writing region close event to WAL at 1731578248783Closed at 1731578248783 2024-11-14T09:57:28,784 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/.initializing 2024-11-14T09:57:28,784 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/WALs/defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,787 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C46299%2C1731578248370, suffix=, logDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/WALs/defc576eb6b7,46299,1731578248370, archiveDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/oldWALs, maxLogs=10 2024-11-14T09:57:28,787 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C46299%2C1731578248370.1731578248787 2024-11-14T09:57:28,792 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/WALs/defc576eb6b7,46299,1731578248370/defc576eb6b7%2C46299%2C1731578248370.1731578248787 2024-11-14T09:57:28,793 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33689:33689),(127.0.0.1/127.0.0.1:39729:39729)] 2024-11-14T09:57:28,793 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:57:28,793 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:28,793 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,793 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,795 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,796 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:57:28,796 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:28,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:28,797 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,798 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:57:28,798 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:28,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:28,799 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,800 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:57:28,800 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:28,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:28,801 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,802 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:57:28,802 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:28,803 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:28,803 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,804 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,804 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,805 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,805 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,806 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:57:28,807 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:57:28,809 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:57:28,809 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=795290, jitterRate=0.011263757944107056}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:57:28,810 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578248794Initializing all the Stores at 1731578248794Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578248794Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578248795 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578248795Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578248795Cleaning up temporary data from old regions at 1731578248805 (+10 ms)Region opened successfully at 1731578248810 (+5 ms) 2024-11-14T09:57:28,810 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:57:28,813 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3da5440, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:57:28,814 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:57:28,814 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:57:28,814 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:57:28,814 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:57:28,815 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:57:28,815 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:57:28,815 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:57:28,817 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:57:28,818 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:57:28,830 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:57:28,830 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:57:28,831 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:57:28,842 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:57:28,843 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:57:28,844 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:57:28,853 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:57:28,854 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:57:28,863 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:57:28,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:57:28,874 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:57:28,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:57:28,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,884 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:57:28,885 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,885 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,46299,1731578248370, sessionid=0x10138c7384b0000, setting cluster-up flag (Was=false) 2024-11-14T09:57:28,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,937 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:57:28,938 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,958 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:28,990 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:57:28,993 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,46299,1731578248370 2024-11-14T09:57:28,994 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:57:28,996 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:57:28,997 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:57:28,997 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:57:28,997 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,46299,1731578248370 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:57:28,999 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:57:28,999 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:57:28,999 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:57:29,000 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:57:29,000 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:57:29,000 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,000 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:57:29,000 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,003 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578279003 2024-11-14T09:57:29,003 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:57:29,004 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,004 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:57:29,004 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:57:29,006 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,006 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:57:29,008 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:57:29,008 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:57:29,008 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:57:29,009 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:57:29,009 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:57:29,009 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578249009,5,FailOnTimeoutGroup] 2024-11-14T09:57:29,010 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578249009,5,FailOnTimeoutGroup] 2024-11-14T09:57:29,010 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,010 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:57:29,010 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,010 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:57:29,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:57:29,013 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:57:29,013 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe 2024-11-14T09:57:29,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:57:29,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:57:29,019 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:29,020 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:57:29,022 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:57:29,022 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,022 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:57:29,023 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:57:29,023 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,024 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:57:29,025 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:57:29,025 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,025 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,026 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:57:29,027 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:57:29,027 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,027 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,027 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:57:29,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740 2024-11-14T09:57:29,028 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740 2024-11-14T09:57:29,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:57:29,029 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:57:29,030 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:57:29,030 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:57:29,033 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:57:29,033 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=757732, jitterRate=-0.036495089530944824}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578249019Initializing all the Stores at 1731578249020 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249020Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249020Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578249020Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249020Cleaning up temporary data from old regions at 1731578249029 (+9 ms)Region opened successfully at 1731578249034 (+5 ms) 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:57:29,034 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:57:29,034 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:57:29,034 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578249034Disabling compacts and flushes for region at 1731578249034Disabling writes for close at 1731578249034Writing region close event to WAL at 1731578249034Closed at 1731578249034 2024-11-14T09:57:29,035 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:57:29,035 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:57:29,036 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:57:29,037 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:57:29,038 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:57:29,081 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(746): ClusterId : e382a50c-b256-45f7-b09f-0077af87abf0 2024-11-14T09:57:29,081 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:57:29,094 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:57:29,094 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:57:29,107 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:57:29,107 DEBUG [RS:0;defc576eb6b7:34147 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f6a4b45, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:57:29,128 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:34147 2024-11-14T09:57:29,128 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:57:29,128 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:57:29,128 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:57:29,129 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,46299,1731578248370 with port=34147, startcode=1731578248657 2024-11-14T09:57:29,129 DEBUG [RS:0;defc576eb6b7:34147 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:57:29,131 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53753, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:57:29,132 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46299 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,132 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46299 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,133 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe 2024-11-14T09:57:29,133 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:41179 2024-11-14T09:57:29,133 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:57:29,145 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:57:29,146 DEBUG [RS:0;defc576eb6b7:34147 {}] zookeeper.ZKUtil(111): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,146 WARN [RS:0;defc576eb6b7:34147 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:57:29,146 INFO [RS:0;defc576eb6b7:34147 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:57:29,146 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,146 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,34147,1731578248657] 2024-11-14T09:57:29,149 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:57:29,151 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:57:29,151 INFO [RS:0;defc576eb6b7:34147 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:57:29,151 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,151 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:57:29,152 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:57:29,152 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,152 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:57:29,153 DEBUG [RS:0;defc576eb6b7:34147 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:57:29,153 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,153 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,153 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,154 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,154 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,154 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34147,1731578248657-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:57:29,171 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:57:29,171 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34147,1731578248657-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,171 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,171 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.Replication(171): defc576eb6b7,34147,1731578248657 started 2024-11-14T09:57:29,183 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,183 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,34147,1731578248657, RpcServer on defc576eb6b7/172.17.0.2:34147, sessionid=0x10138c7384b0001 2024-11-14T09:57:29,183 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:57:29,183 DEBUG [RS:0;defc576eb6b7:34147 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,183 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,34147,1731578248657' 2024-11-14T09:57:29,183 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,34147,1731578248657' 2024-11-14T09:57:29,184 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:57:29,185 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:57:29,185 DEBUG [RS:0;defc576eb6b7:34147 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:57:29,185 INFO [RS:0;defc576eb6b7:34147 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:57:29,185 INFO [RS:0;defc576eb6b7:34147 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:57:29,188 WARN [defc576eb6b7:46299 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:57:29,288 INFO [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C34147%2C1731578248657, suffix=, logDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657, archiveDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs, maxLogs=32 2024-11-14T09:57:29,289 INFO [RS:0;defc576eb6b7:34147 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C34147%2C1731578248657.1731578249289 2024-11-14T09:57:29,295 INFO [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578249289 2024-11-14T09:57:29,296 DEBUG [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33689:33689),(127.0.0.1/127.0.0.1:39729:39729)] 2024-11-14T09:57:29,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:29,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:29,438 DEBUG [defc576eb6b7:46299 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:57:29,439 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,440 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,34147,1731578248657, state=OPENING 2024-11-14T09:57:29,452 DEBUG [PEWorker-4 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:57:29,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:29,463 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:57:29,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:57:29,464 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:57:29,464 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:57:29,464 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,34147,1731578248657}] 2024-11-14T09:57:29,617 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:57:29,619 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51683, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:57:29,622 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:57:29,622 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:57:29,624 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C34147%2C1731578248657.meta, suffix=.meta, logDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657, archiveDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs, maxLogs=32 2024-11-14T09:57:29,624 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C34147%2C1731578248657.meta.1731578249624.meta 2024-11-14T09:57:29,636 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.meta.1731578249624.meta 2024-11-14T09:57:29,648 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:33689:33689),(127.0.0.1/127.0.0.1:39729:39729)] 2024-11-14T09:57:29,648 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:57:29,649 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:57:29,649 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:57:29,650 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:57:29,651 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:57:29,651 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,651 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:57:29,652 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:57:29,652 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,653 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:57:29,653 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:57:29,653 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,654 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:57:29,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:57:29,654 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:57:29,655 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:57:29,655 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740 2024-11-14T09:57:29,656 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740 2024-11-14T09:57:29,657 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:57:29,657 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:57:29,658 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:57:29,659 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:57:29,660 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=781100, jitterRate=-0.006780654191970825}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:57:29,660 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:57:29,661 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578249649Writing region info on filesystem at 1731578249649Initializing all the Stores at 1731578249650 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249650Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249650Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578249650Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578249650Cleaning up temporary data from old regions at 1731578249657 (+7 ms)Running coprocessor post-open hooks at 1731578249660 (+3 ms)Region opened successfully at 1731578249661 (+1 ms) 2024-11-14T09:57:29,662 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578249617 2024-11-14T09:57:29,664 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:57:29,664 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:57:29,664 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,665 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,34147,1731578248657, state=OPEN 2024-11-14T09:57:29,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:57:29,708 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:57:29,708 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:57:29,709 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:57:29,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:57:29,712 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,34147,1731578248657 in 245 msec 2024-11-14T09:57:29,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:57:29,714 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 676 msec 2024-11-14T09:57:29,715 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:57:29,715 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:57:29,716 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:57:29,716 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,34147,1731578248657, seqNum=-1] 2024-11-14T09:57:29,717 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:57:29,718 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42447, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:57:29,724 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 727 msec 2024-11-14T09:57:29,724 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578249724, completionTime=-1 2024-11-14T09:57:29,725 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:57:29,725 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578309727 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578369727 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,727 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:46299, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,728 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,728 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,730 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.005sec 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:57:29,732 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:57:29,735 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:57:29,735 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:57:29,735 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,46299,1731578248370-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:57:29,781 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@78c409a4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:57:29,781 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,46299,-1 for getting cluster id 2024-11-14T09:57:29,781 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:57:29,782 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'e382a50c-b256-45f7-b09f-0077af87abf0' 2024-11-14T09:57:29,783 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:57:29,783 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "e382a50c-b256-45f7-b09f-0077af87abf0" 2024-11-14T09:57:29,783 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@533a12fb, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:57:29,783 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,46299,-1] 2024-11-14T09:57:29,783 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:57:29,784 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:57:29,785 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40326, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:57:29,785 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@ddab4f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:57:29,786 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:57:29,787 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,34147,1731578248657, seqNum=-1] 2024-11-14T09:57:29,787 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:57:29,788 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53856, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:57:29,790 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,46299,1731578248370 2024-11-14T09:57:29,790 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:57:29,793 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:57:29,793 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-14T09:57:29,794 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is defc576eb6b7,46299,1731578248370 2024-11-14T09:57:29,794 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@79a93648 2024-11-14T09:57:29,795 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-14T09:57:29,796 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40340, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-14T09:57:29,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-14T09:57:29,796 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-14T09:57:29,797 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:57:29,798 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-14T09:57:29,800 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-14T09:57:29,800 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:29,800 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-14T09:57:29,801 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:57:29,801 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-14T09:57:29,808 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741835_1011 (size=381) 2024-11-14T09:57:29,809 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741835_1011 (size=381) 2024-11-14T09:57:29,813 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 382381bfe8c6861e82c5e0cdf94eb053, NAME => 'TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe 2024-11-14T09:57:29,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741836_1012 (size=64) 2024-11-14T09:57:29,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741836_1012 (size=64) 2024-11-14T09:57:29,820 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:29,820 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 382381bfe8c6861e82c5e0cdf94eb053, disabling compactions & flushes 2024-11-14T09:57:29,820 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:29,820 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:29,820 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. after waiting 0 ms 2024-11-14T09:57:29,821 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:29,821 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:29,821 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 382381bfe8c6861e82c5e0cdf94eb053: Waiting for close lock at 1731578249820Disabling compacts and flushes for region at 1731578249820Disabling writes for close at 1731578249820Writing region close event to WAL at 1731578249821 (+1 ms)Closed at 1731578249821 2024-11-14T09:57:29,822 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-14T09:57:29,823 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731578249822"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578249822"}]},"ts":"1731578249822"} 2024-11-14T09:57:29,825 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-14T09:57:29,826 INFO [PEWorker-4 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-14T09:57:29,826 DEBUG [PEWorker-4 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578249826"}]},"ts":"1731578249826"} 2024-11-14T09:57:29,829 INFO [PEWorker-4 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-14T09:57:29,829 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, ASSIGN}] 2024-11-14T09:57:29,831 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, ASSIGN 2024-11-14T09:57:29,832 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, ASSIGN; state=OFFLINE, location=defc576eb6b7,34147,1731578248657; forceNewPlan=false, retain=false 2024-11-14T09:57:29,982 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=382381bfe8c6861e82c5e0cdf94eb053, regionState=OPENING, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:29,985 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, ASSIGN because future has completed 2024-11-14T09:57:29,986 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657}] 2024-11-14T09:57:30,144 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:30,144 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 382381bfe8c6861e82c5e0cdf94eb053, NAME => 'TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:57:30,144 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,145 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:30,145 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,145 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,146 INFO [StoreOpener-382381bfe8c6861e82c5e0cdf94eb053-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,148 INFO [StoreOpener-382381bfe8c6861e82c5e0cdf94eb053-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 382381bfe8c6861e82c5e0cdf94eb053 columnFamilyName info 2024-11-14T09:57:30,148 DEBUG [StoreOpener-382381bfe8c6861e82c5e0cdf94eb053-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:30,149 INFO [StoreOpener-382381bfe8c6861e82c5e0cdf94eb053-1 {}] regionserver.HStore(327): Store=382381bfe8c6861e82c5e0cdf94eb053/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:30,149 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,149 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,150 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,150 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,150 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,152 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,154 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:57:30,155 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 382381bfe8c6861e82c5e0cdf94eb053; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=702343, jitterRate=-0.10692492127418518}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:57:30,155 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:30,156 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 382381bfe8c6861e82c5e0cdf94eb053: Running coprocessor pre-open hook at 1731578250145Writing region info on filesystem at 1731578250145Initializing all the Stores at 1731578250146 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578250146Cleaning up temporary data from old regions at 1731578250150 (+4 ms)Running coprocessor post-open hooks at 1731578250155 (+5 ms)Region opened successfully at 1731578250156 (+1 ms) 2024-11-14T09:57:30,157 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., pid=6, masterSystemTime=1731578250138 2024-11-14T09:57:30,160 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:30,160 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:30,161 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=382381bfe8c6861e82c5e0cdf94eb053, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:30,164 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 because future has completed 2024-11-14T09:57:30,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-14T09:57:30,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 in 180 msec 2024-11-14T09:57:30,170 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-14T09:57:30,171 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, ASSIGN in 339 msec 2024-11-14T09:57:30,171 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-14T09:57:30,172 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1731578250171"}]},"ts":"1731578250171"} 2024-11-14T09:57:30,174 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-14T09:57:30,175 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-14T09:57:30,177 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 378 msec 2024-11-14T09:57:30,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,201 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,202 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,221 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,222 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,226 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,227 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,229 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:30,412 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:30,735 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:57:30,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,736 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,737 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,738 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,759 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,760 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,765 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:30,768 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:31,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:31,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:32,058 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-14T09:57:32,058 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-14T09:57:32,059 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-14T09:57:32,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:32,413 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:33,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:33,414 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:34,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:34,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:35,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:35,415 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:35,648 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:57:35,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,650 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,651 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,652 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,653 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,681 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,682 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,683 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,687 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,690 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:35,696 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:57:35,696 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-14T09:57:36,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:36,416 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:37,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:37,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:38,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:38,417 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:39,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:39,418 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:39,853 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=46299 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-14T09:57:39,853 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-14T09:57:39,853 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-14T09:57:39,857 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-14T09:57:39,857 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:39,860 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2] 2024-11-14T09:57:39,876 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:39,876 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 382381bfe8c6861e82c5e0cdf94eb053 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:57:39,896 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/888facd8a1af41b18e0448818d0e5e0d is 1080, key is row0001/info:/1731578259862/Put/seqid=0 2024-11-14T09:57:39,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741837_1013 (size=12509) 2024-11-14T09:57:39,901 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741837_1013 (size=12509) 2024-11-14T09:57:39,905 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/888facd8a1af41b18e0448818d0e5e0d 2024-11-14T09:57:39,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/888facd8a1af41b18e0448818d0e5e0d as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d 2024-11-14T09:57:39,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d, entries=7, sequenceid=11, filesize=12.2 K 2024-11-14T09:57:39,919 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=19.96 KB/20444 for 382381bfe8c6861e82c5e0cdf94eb053 in 42ms, sequenceid=11, compaction requested=false 2024-11-14T09:57:39,919 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:39,921 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:39,921 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 382381bfe8c6861e82c5e0cdf94eb053 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-14T09:57:39,925 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/3f424cdf00f24b99a1a8684b40b5bd48 is 1080, key is row0008/info:/1731578259877/Put/seqid=0 2024-11-14T09:57:39,929 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741838_1014 (size=26530) 2024-11-14T09:57:39,930 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741838_1014 (size=26530) 2024-11-14T09:57:39,931 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/3f424cdf00f24b99a1a8684b40b5bd48 2024-11-14T09:57:39,937 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/3f424cdf00f24b99a1a8684b40b5bd48 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 2024-11-14T09:57:39,942 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48, entries=20, sequenceid=34, filesize=25.9 K 2024-11-14T09:57:39,943 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=5.25 KB/5380 for 382381bfe8c6861e82c5e0cdf94eb053 in 22ms, sequenceid=34, compaction requested=false 2024-11-14T09:57:39,943 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:39,943 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=38.1 K, sizeToCheck=16.0 K 2024-11-14T09:57:39,943 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:39,943 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 because midkey is the same as first or last row 2024-11-14T09:57:40,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:40,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:41,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:41,419 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:41,937 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:41,937 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 382381bfe8c6861e82c5e0cdf94eb053 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:57:41,942 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/4e156c4801a84983829412591aca2db1 is 1080, key is row0028/info:/1731578259922/Put/seqid=0 2024-11-14T09:57:41,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741839_1015 (size=12509) 2024-11-14T09:57:41,966 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741839_1015 (size=12509) 2024-11-14T09:57:41,966 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=44 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/4e156c4801a84983829412591aca2db1 2024-11-14T09:57:41,972 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/4e156c4801a84983829412591aca2db1 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1 2024-11-14T09:57:41,978 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1, entries=7, sequenceid=44, filesize=12.2 K 2024-11-14T09:57:41,979 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 382381bfe8c6861e82c5e0cdf94eb053 in 42ms, sequenceid=44, compaction requested=true 2024-11-14T09:57:41,979 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:41,979 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=50.3 K, sizeToCheck=16.0 K 2024-11-14T09:57:41,979 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:41,979 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 because midkey is the same as first or last row 2024-11-14T09:57:41,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 382381bfe8c6861e82c5e0cdf94eb053:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:57:41,980 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:41,980 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:57:41,981 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 51548 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:57:41,981 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 382381bfe8c6861e82c5e0cdf94eb053/info is initiating minor compaction (all files) 2024-11-14T09:57:41,981 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 382381bfe8c6861e82c5e0cdf94eb053/info in TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:41,981 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp, totalSize=50.3 K 2024-11-14T09:57:41,981 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:41,982 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 382381bfe8c6861e82c5e0cdf94eb053 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-14T09:57:41,982 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 888facd8a1af41b18e0448818d0e5e0d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1731578259862 2024-11-14T09:57:41,982 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f424cdf00f24b99a1a8684b40b5bd48, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1731578259877 2024-11-14T09:57:41,983 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4e156c4801a84983829412591aca2db1, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731578259922 2024-11-14T09:57:41,986 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/6c23c05f102548819100ec6a35ae9510 is 1080, key is row0035/info:/1731578261938/Put/seqid=0 2024-11-14T09:57:41,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741840_1016 (size=26530) 2024-11-14T09:57:41,997 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741840_1016 (size=26530) 2024-11-14T09:57:41,998 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=67 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/6c23c05f102548819100ec6a35ae9510 2024-11-14T09:57:42,005 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 382381bfe8c6861e82c5e0cdf94eb053#info#compaction#60 average throughput is 11.63 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:57:42,005 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/578e8e0de55040da99f0c3bbfc33a293 is 1080, key is row0001/info:/1731578259862/Put/seqid=0 2024-11-14T09:57:42,005 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-14T09:57:42,006 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/6c23c05f102548819100ec6a35ae9510 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510 2024-11-14T09:57:42,009 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] ipc.CallRunner(138): callId: 69 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53856 deadline: 1731578272005, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:42,013 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510, entries=20, sequenceid=67, filesize=25.9 K 2024-11-14T09:57:42,014 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=9.46 KB/9684 for 382381bfe8c6861e82c5e0cdf94eb053 in 33ms, sequenceid=67, compaction requested=false 2024-11-14T09:57:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=76.2 K, sizeToCheck=16.0 K 2024-11-14T09:57:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:42,014 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 because midkey is the same as first or last row 2024-11-14T09:57:42,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741841_1017 (size=41747) 2024-11-14T09:57:42,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741841_1017 (size=41747) 2024-11-14T09:57:42,025 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/578e8e0de55040da99f0c3bbfc33a293 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 2024-11-14T09:57:42,032 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 382381bfe8c6861e82c5e0cdf94eb053/info of 382381bfe8c6861e82c5e0cdf94eb053 into 578e8e0de55040da99f0c3bbfc33a293(size=40.8 K), total size for store is 66.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:42,032 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., storeName=382381bfe8c6861e82c5e0cdf94eb053/info, priority=13, startTime=1731578261979; duration=0sec 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 because midkey is the same as first or last row 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 because midkey is the same as first or last row 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=66.7 K, sizeToCheck=16.0 K 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 because midkey is the same as first or last row 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:42,032 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 382381bfe8c6861e82c5e0cdf94eb053:info 2024-11-14T09:57:42,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:57:42,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:57:42,033 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 because the exception is null or not the one we care about 2024-11-14T09:57:42,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:42,420 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:43,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:43,421 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:44,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:44,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:45,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:45,422 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:46,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:46,423 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:47,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:47,424 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:48,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:48,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:49,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:49,425 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:50,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:50,426 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:51,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:51,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:52,114 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:52,114 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 382381bfe8c6861e82c5e0cdf94eb053 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-14T09:57:52,119 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/d460e8857330411fa2d34b44f9017988 is 1080, key is row0055/info:/1731578261983/Put/seqid=0 2024-11-14T09:57:52,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741842_1018 (size=15740) 2024-11-14T09:57:52,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741842_1018 (size=15740) 2024-11-14T09:57:52,131 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=81 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/d460e8857330411fa2d34b44f9017988 2024-11-14T09:57:52,137 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/d460e8857330411fa2d34b44f9017988 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988 2024-11-14T09:57:52,142 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988, entries=10, sequenceid=81, filesize=15.4 K 2024-11-14T09:57:52,143 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=0 B/0 for 382381bfe8c6861e82c5e0cdf94eb053 in 29ms, sequenceid=81, compaction requested=true 2024-11-14T09:57:52,143 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=82.0 K, sizeToCheck=16.0 K 2024-11-14T09:57:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 because midkey is the same as first or last row 2024-11-14T09:57:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 382381bfe8c6861e82c5e0cdf94eb053:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:57:52,144 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:52,144 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:57:52,145 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 84017 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:57:52,145 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 382381bfe8c6861e82c5e0cdf94eb053/info is initiating minor compaction (all files) 2024-11-14T09:57:52,145 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 382381bfe8c6861e82c5e0cdf94eb053/info in TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:52,145 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp, totalSize=82.0 K 2024-11-14T09:57:52,146 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 578e8e0de55040da99f0c3bbfc33a293, keycount=34, bloomtype=ROW, size=40.8 K, encoding=NONE, compression=NONE, seqNum=44, earliestPutTs=1731578259862 2024-11-14T09:57:52,146 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6c23c05f102548819100ec6a35ae9510, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=67, earliestPutTs=1731578261938 2024-11-14T09:57:52,146 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting d460e8857330411fa2d34b44f9017988, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731578261983 2024-11-14T09:57:52,162 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 382381bfe8c6861e82c5e0cdf94eb053#info#compaction#62 average throughput is 21.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:57:52,162 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/2f8c56f58d1d48e1be9aca11804a8ffe is 1080, key is row0001/info:/1731578259862/Put/seqid=0 2024-11-14T09:57:52,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741843_1019 (size=74301) 2024-11-14T09:57:52,166 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741843_1019 (size=74301) 2024-11-14T09:57:52,172 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/.tmp/info/2f8c56f58d1d48e1be9aca11804a8ffe as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe 2024-11-14T09:57:52,178 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 382381bfe8c6861e82c5e0cdf94eb053/info of 382381bfe8c6861e82c5e0cdf94eb053 into 2f8c56f58d1d48e1be9aca11804a8ffe(size=72.6 K), total size for store is 72.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 382381bfe8c6861e82c5e0cdf94eb053: 2024-11-14T09:57:52,178 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., storeName=382381bfe8c6861e82c5e0cdf94eb053/info, priority=13, startTime=1731578272144; duration=0sec 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-14T09:57:52,178 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-14T09:57:52,179 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:52,179 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:52,179 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 382381bfe8c6861e82c5e0cdf94eb053:info 2024-11-14T09:57:52,180 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46299 {}] assignment.AssignmentManager(1363): Split request from defc576eb6b7,34147,1731578248657, parent={ENCODED => 382381bfe8c6861e82c5e0cdf94eb053, NAME => 'TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-14T09:57:52,185 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46299 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:52,189 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=46299 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=382381bfe8c6861e82c5e0cdf94eb053, daughterA=0d7002df19b51f1b02d2741a1f5a7eb3, daughterB=1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:52,190 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=382381bfe8c6861e82c5e0cdf94eb053, daughterA=0d7002df19b51f1b02d2741a1f5a7eb3, daughterB=1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:52,190 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=382381bfe8c6861e82c5e0cdf94eb053, daughterA=0d7002df19b51f1b02d2741a1f5a7eb3, daughterB=1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:52,190 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=382381bfe8c6861e82c5e0cdf94eb053, daughterA=0d7002df19b51f1b02d2741a1f5a7eb3, daughterB=1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:52,197 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, UNASSIGN}] 2024-11-14T09:57:52,198 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, UNASSIGN 2024-11-14T09:57:52,200 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=382381bfe8c6861e82c5e0cdf94eb053, regionState=CLOSING, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:52,202 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, UNASSIGN because future has completed 2024-11-14T09:57:52,202 DEBUG [PEWorker-3 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-14T09:57:52,203 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657}] 2024-11-14T09:57:52,361 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:52,361 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-14T09:57:52,361 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 382381bfe8c6861e82c5e0cdf94eb053, disabling compactions & flushes 2024-11-14T09:57:52,361 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:52,362 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:52,362 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. after waiting 0 ms 2024-11-14T09:57:52,362 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:52,363 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988] to archive 2024-11-14T09:57:52,365 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:57:52,368 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/888facd8a1af41b18e0448818d0e5e0d 2024-11-14T09:57:52,369 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/3f424cdf00f24b99a1a8684b40b5bd48 2024-11-14T09:57:52,371 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/578e8e0de55040da99f0c3bbfc33a293 2024-11-14T09:57:52,372 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/4e156c4801a84983829412591aca2db1 2024-11-14T09:57:52,373 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/6c23c05f102548819100ec6a35ae9510 2024-11-14T09:57:52,375 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/d460e8857330411fa2d34b44f9017988 2024-11-14T09:57:52,381 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=1 2024-11-14T09:57:52,382 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. 2024-11-14T09:57:52,382 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 382381bfe8c6861e82c5e0cdf94eb053: Waiting for close lock at 1731578272361Running coprocessor pre-close hooks at 1731578272361Disabling compacts and flushes for region at 1731578272361Disabling writes for close at 1731578272362 (+1 ms)Writing region close event to WAL at 1731578272377 (+15 ms)Running coprocessor post-close hooks at 1731578272382 (+5 ms)Closed at 1731578272382 2024-11-14T09:57:52,384 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:52,385 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=382381bfe8c6861e82c5e0cdf94eb053, regionState=CLOSED 2024-11-14T09:57:52,386 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 because future has completed 2024-11-14T09:57:52,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-14T09:57:52,390 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 382381bfe8c6861e82c5e0cdf94eb053, server=defc576eb6b7,34147,1731578248657 in 186 msec 2024-11-14T09:57:52,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-14T09:57:52,392 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=382381bfe8c6861e82c5e0cdf94eb053, UNASSIGN in 193 msec 2024-11-14T09:57:52,400 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:52,403 INFO [PEWorker-3 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 1 storefiles, region=382381bfe8c6861e82c5e0cdf94eb053, threads=1 2024-11-14T09:57:52,405 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe for region: 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741844_1020 (size=27) 2024-11-14T09:57:52,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741844_1020 (size=27) 2024-11-14T09:57:52,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:52,427 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741845_1021 (size=27) 2024-11-14T09:57:52,830 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741845_1021 (size=27) 2024-11-14T09:57:52,831 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe for region: 382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:57:52,833 DEBUG [PEWorker-3 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 382381bfe8c6861e82c5e0cdf94eb053 Daughter A: [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053] storefiles, Daughter B: [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053] storefiles. 2024-11-14T09:57:52,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741846_1022 (size=71) 2024-11-14T09:57:52,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741846_1022 (size=71) 2024-11-14T09:57:52,843 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:52,854 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741847_1023 (size=71) 2024-11-14T09:57:52,855 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741847_1023 (size=71) 2024-11-14T09:57:52,857 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:52,866 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-14T09:57:52,868 DEBUG [PEWorker-3 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/recovered.edits/85.seqid, newMaxSeqId=85, maxSeqId=-1 2024-11-14T09:57:52,871 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1731578272870"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1731578272870"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1731578272870"}]},"ts":"1731578272870"} 2024-11-14T09:57:52,871 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731578272870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578272870"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731578272870"}]},"ts":"1731578272870"} 2024-11-14T09:57:52,871 DEBUG [PEWorker-3 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1731578272870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1731578272870"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1731578272870"}]},"ts":"1731578272870"} 2024-11-14T09:57:52,888 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d7002df19b51f1b02d2741a1f5a7eb3, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1fd3d7545975d433190167fa08726b24, ASSIGN}] 2024-11-14T09:57:52,890 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d7002df19b51f1b02d2741a1f5a7eb3, ASSIGN 2024-11-14T09:57:52,890 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1fd3d7545975d433190167fa08726b24, ASSIGN 2024-11-14T09:57:52,891 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1fd3d7545975d433190167fa08726b24, ASSIGN; state=SPLITTING_NEW, location=defc576eb6b7,34147,1731578248657; forceNewPlan=false, retain=false 2024-11-14T09:57:52,891 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d7002df19b51f1b02d2741a1f5a7eb3, ASSIGN; state=SPLITTING_NEW, location=defc576eb6b7,34147,1731578248657; forceNewPlan=false, retain=false 2024-11-14T09:57:53,042 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0d7002df19b51f1b02d2741a1f5a7eb3, regionState=OPENING, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:53,042 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=1fd3d7545975d433190167fa08726b24, regionState=OPENING, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:53,045 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d7002df19b51f1b02d2741a1f5a7eb3, ASSIGN because future has completed 2024-11-14T09:57:53,046 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d7002df19b51f1b02d2741a1f5a7eb3, server=defc576eb6b7,34147,1731578248657}] 2024-11-14T09:57:53,046 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1fd3d7545975d433190167fa08726b24, ASSIGN because future has completed 2024-11-14T09:57:53,048 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fd3d7545975d433190167fa08726b24, server=defc576eb6b7,34147,1731578248657}] 2024-11-14T09:57:53,203 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:57:53,203 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 0d7002df19b51f1b02d2741a1f5a7eb3, NAME => 'TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-14T09:57:53,204 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,204 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:53,204 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,204 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,205 INFO [StoreOpener-0d7002df19b51f1b02d2741a1f5a7eb3-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,206 INFO [StoreOpener-0d7002df19b51f1b02d2741a1f5a7eb3-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0d7002df19b51f1b02d2741a1f5a7eb3 columnFamilyName info 2024-11-14T09:57:53,207 DEBUG [StoreOpener-0d7002df19b51f1b02d2741a1f5a7eb3-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:53,220 DEBUG [StoreOpener-0d7002df19b51f1b02d2741a1f5a7eb3-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-bottom 2024-11-14T09:57:53,220 INFO [StoreOpener-0d7002df19b51f1b02d2741a1f5a7eb3-1 {}] regionserver.HStore(327): Store=0d7002df19b51f1b02d2741a1f5a7eb3/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:53,220 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,221 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,222 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,223 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,223 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,225 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,227 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 0d7002df19b51f1b02d2741a1f5a7eb3; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800907, jitterRate=0.018406391143798828}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:57:53,227 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:57:53,228 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 0d7002df19b51f1b02d2741a1f5a7eb3: Running coprocessor pre-open hook at 1731578273204Writing region info on filesystem at 1731578273204Initializing all the Stores at 1731578273205 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578273205Cleaning up temporary data from old regions at 1731578273223 (+18 ms)Running coprocessor post-open hooks at 1731578273227 (+4 ms)Region opened successfully at 1731578273228 (+1 ms) 2024-11-14T09:57:53,229 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3., pid=12, masterSystemTime=1731578273199 2024-11-14T09:57:53,229 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 0d7002df19b51f1b02d2741a1f5a7eb3:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:57:53,229 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T09:57:53,229 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:53,230 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:57:53,230 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 0d7002df19b51f1b02d2741a1f5a7eb3/info is initiating minor compaction (all files) 2024-11-14T09:57:53,230 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0d7002df19b51f1b02d2741a1f5a7eb3/info in TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:57:53,230 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-bottom] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/.tmp, totalSize=72.6 K 2024-11-14T09:57:53,231 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=81, earliestPutTs=1731578259862 2024-11-14T09:57:53,232 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=0d7002df19b51f1b02d2741a1f5a7eb3, regionState=OPEN, openSeqNum=86, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:53,240 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:57:53,240 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:57:53,240 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:57:53,240 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 1fd3d7545975d433190167fa08726b24, NAME => 'TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-14T09:57:53,241 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,241 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:57:53,241 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,241 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,242 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-14T09:57:53,242 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-14T09:57:53,242 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-14T09:57:53,242 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0d7002df19b51f1b02d2741a1f5a7eb3, server=defc576eb6b7,34147,1731578248657 because future has completed 2024-11-14T09:57:53,242 INFO [StoreOpener-1fd3d7545975d433190167fa08726b24-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,243 INFO [StoreOpener-1fd3d7545975d433190167fa08726b24-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1fd3d7545975d433190167fa08726b24 columnFamilyName info 2024-11-14T09:57:53,243 DEBUG [StoreOpener-1fd3d7545975d433190167fa08726b24-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:57:53,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-14T09:57:53,247 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 0d7002df19b51f1b02d2741a1f5a7eb3, server=defc576eb6b7,34147,1731578248657 in 198 msec 2024-11-14T09:57:53,250 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=0d7002df19b51f1b02d2741a1f5a7eb3, ASSIGN in 359 msec 2024-11-14T09:57:53,251 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0d7002df19b51f1b02d2741a1f5a7eb3#info#compaction#63 average throughput is 31.30 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:57:53,252 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/.tmp/info/4b5106528809402a8688808f4b1487a9 is 1080, key is row0001/info:/1731578259862/Put/seqid=0 2024-11-14T09:57:53,259 DEBUG [StoreOpener-1fd3d7545975d433190167fa08726b24-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-top 2024-11-14T09:57:53,260 INFO [StoreOpener-1fd3d7545975d433190167fa08726b24-1 {}] regionserver.HStore(327): Store=1fd3d7545975d433190167fa08726b24/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:57:53,260 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,261 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/749c7ed6d524481f80fc5e825bbf0fc7 is 193, key is TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24./info:regioninfo/1731578273042/Put/seqid=0 2024-11-14T09:57:53,261 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,263 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,263 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,264 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,265 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,266 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 1fd3d7545975d433190167fa08726b24; next sequenceid=86; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=836379, jitterRate=0.06351098418235779}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-14T09:57:53,266 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:57:53,266 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 1fd3d7545975d433190167fa08726b24: Running coprocessor pre-open hook at 1731578273241Writing region info on filesystem at 1731578273241Initializing all the Stores at 1731578273242 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578273242Cleaning up temporary data from old regions at 1731578273264 (+22 ms)Running coprocessor post-open hooks at 1731578273266 (+2 ms)Region opened successfully at 1731578273266 2024-11-14T09:57:53,267 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., pid=13, masterSystemTime=1731578273199 2024-11-14T09:57:53,267 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 2 2024-11-14T09:57:53,267 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:53,267 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-14T09:57:53,268 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:57:53,268 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:57:53,268 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:57:53,268 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-top] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=72.6 K 2024-11-14T09:57:53,269 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] compactions.Compactor(225): Compacting 2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053, keycount=32, bloomtype=ROW, size=72.6 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731578259862 2024-11-14T09:57:53,269 DEBUG [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:57:53,269 INFO [RS_OPEN_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:57:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741848_1024 (size=70862) 2024-11-14T09:57:53,270 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741848_1024 (size=70862) 2024-11-14T09:57:53,271 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=1fd3d7545975d433190167fa08726b24, regionState=OPEN, openSeqNum=86, regionLocation=defc576eb6b7,34147,1731578248657 2024-11-14T09:57:53,273 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1fd3d7545975d433190167fa08726b24, server=defc576eb6b7,34147,1731578248657 because future has completed 2024-11-14T09:57:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741849_1025 (size=9847) 2024-11-14T09:57:53,276 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741849_1025 (size=9847) 2024-11-14T09:57:53,276 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/749c7ed6d524481f80fc5e825bbf0fc7 2024-11-14T09:57:53,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-14T09:57:53,278 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 1fd3d7545975d433190167fa08726b24, server=defc576eb6b7,34147,1731578248657 in 228 msec 2024-11-14T09:57:53,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-14T09:57:53,280 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=1fd3d7545975d433190167fa08726b24, ASSIGN in 390 msec 2024-11-14T09:57:53,282 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=382381bfe8c6861e82c5e0cdf94eb053, daughterA=0d7002df19b51f1b02d2741a1f5a7eb3, daughterB=1fd3d7545975d433190167fa08726b24 in 1.0950 sec 2024-11-14T09:57:53,285 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/.tmp/info/4b5106528809402a8688808f4b1487a9 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/4b5106528809402a8688808f4b1487a9 2024-11-14T09:57:53,291 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#65 average throughput is 3.08 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:57:53,291 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:57:53,292 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 0d7002df19b51f1b02d2741a1f5a7eb3/info of 0d7002df19b51f1b02d2741a1f5a7eb3 into 4b5106528809402a8688808f4b1487a9(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:57:53,292 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0d7002df19b51f1b02d2741a1f5a7eb3: 2024-11-14T09:57:53,292 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3., storeName=0d7002df19b51f1b02d2741a1f5a7eb3/info, priority=15, startTime=1731578273229; duration=0sec 2024-11-14T09:57:53,292 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:53,292 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0d7002df19b51f1b02d2741a1f5a7eb3:info 2024-11-14T09:57:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741850_1026 (size=8260) 2024-11-14T09:57:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741850_1026 (size=8260) 2024-11-14T09:57:53,310 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/ns/d863813495194c04847b91446d30ccd4 is 43, key is default/ns:d/1731578249718/Put/seqid=0 2024-11-14T09:57:53,313 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3 2024-11-14T09:57:53,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741851_1027 (size=5153) 2024-11-14T09:57:53,316 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741851_1027 (size=5153) 2024-11-14T09:57:53,316 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/ns/d863813495194c04847b91446d30ccd4 2024-11-14T09:57:53,320 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into 1b9e7ab6dbc9453db9f9bc3bc185f4a3(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:57:53,320 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:57:53,320 INFO [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=15, startTime=1731578273267; duration=0sec 2024-11-14T09:57:53,320 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:57:53,320 DEBUG [RS:0;defc576eb6b7:34147-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:57:53,339 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/table/22e57141704143c0a0d389a404ab7ed0 is 65, key is TestLogRolling-testLogRolling/table:state/1731578250171/Put/seqid=0 2024-11-14T09:57:53,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741852_1028 (size=5340) 2024-11-14T09:57:53,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741852_1028 (size=5340) 2024-11-14T09:57:53,343 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/table/22e57141704143c0a0d389a404ab7ed0 2024-11-14T09:57:53,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/749c7ed6d524481f80fc5e825bbf0fc7 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/info/749c7ed6d524481f80fc5e825bbf0fc7 2024-11-14T09:57:53,354 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/info/749c7ed6d524481f80fc5e825bbf0fc7, entries=30, sequenceid=17, filesize=9.6 K 2024-11-14T09:57:53,355 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/ns/d863813495194c04847b91446d30ccd4 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/ns/d863813495194c04847b91446d30ccd4 2024-11-14T09:57:53,361 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/ns/d863813495194c04847b91446d30ccd4, entries=2, sequenceid=17, filesize=5.0 K 2024-11-14T09:57:53,362 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/table/22e57141704143c0a0d389a404ab7ed0 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/table/22e57141704143c0a0d389a404ab7ed0 2024-11-14T09:57:53,368 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/table/22e57141704143c0a0d389a404ab7ed0, entries=2, sequenceid=17, filesize=5.2 K 2024-11-14T09:57:53,369 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 127ms, sequenceid=17, compaction requested=false 2024-11-14T09:57:53,369 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:57:53,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:53,428 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:54,116 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] ipc.CallRunner(138): callId: 71 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:53856 deadline: 1731578284115, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. is not online on defc576eb6b7,34147,1731578248657 2024-11-14T09:57:54,116 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. is not online on defc576eb6b7,34147,1731578248657 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:57:54,116 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053. is not online on defc576eb6b7,34147,1731578248657 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-14T09:57:54,117 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1731578249796.382381bfe8c6861e82c5e0cdf94eb053., hostname=defc576eb6b7,34147,1731578248657, seqNum=2 from cache 2024-11-14T09:57:54,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:54,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:55,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:55,429 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:56,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:56,430 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:57,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,383 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,384 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,410 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,413 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:57,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:57,920 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-14T09:57:57,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,923 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,953 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,959 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:57,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:57:58,350 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-14T09:57:58,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:58,431 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:59,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:57:59,432 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:00,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:00,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:01,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:01,433 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:02,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:02,434 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:03,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:03,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:04,193 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0065', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., hostname=defc576eb6b7,34147,1731578248657, seqNum=86] 2024-11-14T09:58:04,206 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:04,206 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:58:04,211 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da55c919413c4833b8a7eda93897426e is 1080, key is row0065/info:/1731578284194/Put/seqid=0 2024-11-14T09:58:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741853_1029 (size=12509) 2024-11-14T09:58:04,216 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741853_1029 (size=12509) 2024-11-14T09:58:04,217 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=96 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da55c919413c4833b8a7eda93897426e 2024-11-14T09:58:04,223 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da55c919413c4833b8a7eda93897426e as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e 2024-11-14T09:58:04,229 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e, entries=7, sequenceid=96, filesize=12.2 K 2024-11-14T09:58:04,230 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 1fd3d7545975d433190167fa08726b24 in 24ms, sequenceid=96, compaction requested=false 2024-11-14T09:58:04,230 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:04,230 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:04,231 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T09:58:04,235 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/d7e30271af9a4100a3dec0009713825a is 1080, key is row0072/info:/1731578284208/Put/seqid=0 2024-11-14T09:58:04,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741854_1030 (size=16817) 2024-11-14T09:58:04,241 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741854_1030 (size=16817) 2024-11-14T09:58:04,242 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=110 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/d7e30271af9a4100a3dec0009713825a 2024-11-14T09:58:04,248 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/d7e30271af9a4100a3dec0009713825a as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a 2024-11-14T09:58:04,257 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a, entries=11, sequenceid=110, filesize=16.4 K 2024-11-14T09:58:04,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=13.66 KB/13988 for 1fd3d7545975d433190167fa08726b24 in 27ms, sequenceid=110, compaction requested=true 2024-11-14T09:58:04,259 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:04,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:04,259 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:04,259 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:04,259 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:04,259 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=14.71 KB heapSize=16 KB 2024-11-14T09:58:04,260 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37586 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:04,260 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:04,260 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:04,260 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=36.7 K 2024-11-14T09:58:04,261 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1b9e7ab6dbc9453db9f9bc3bc185f4a3, keycount=3, bloomtype=ROW, size=8.1 K, encoding=NONE, compression=NONE, seqNum=82, earliestPutTs=1731578262000 2024-11-14T09:58:04,261 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting da55c919413c4833b8a7eda93897426e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=96, earliestPutTs=1731578284194 2024-11-14T09:58:04,261 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting d7e30271af9a4100a3dec0009713825a, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1731578284208 2024-11-14T09:58:04,263 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5c166967079d4c3bacede27dc59b0859 is 1080, key is row0083/info:/1731578284232/Put/seqid=0 2024-11-14T09:58:04,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741855_1031 (size=20064) 2024-11-14T09:58:04,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741855_1031 (size=20064) 2024-11-14T09:58:04,269 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=14.71 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5c166967079d4c3bacede27dc59b0859 2024-11-14T09:58:04,272 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#71 average throughput is 21.55 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:04,272 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5550c2561c9443958f31a5ac16c2f0ad is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:04,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5c166967079d4c3bacede27dc59b0859 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859 2024-11-14T09:58:04,281 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859, entries=14, sequenceid=127, filesize=19.6 K 2024-11-14T09:58:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741856_1032 (size=27778) 2024-11-14T09:58:04,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741856_1032 (size=27778) 2024-11-14T09:58:04,282 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~14.71 KB/15064, heapSize ~15.98 KB/16368, currentSize=0 B/0 for 1fd3d7545975d433190167fa08726b24 in 23ms, sequenceid=127, compaction requested=false 2024-11-14T09:58:04,283 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:04,289 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5550c2561c9443958f31a5ac16c2f0ad as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5550c2561c9443958f31a5ac16c2f0ad 2024-11-14T09:58:04,295 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into 5550c2561c9443958f31a5ac16c2f0ad(size=27.1 K), total size for store is 46.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:04,295 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:04,295 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578284259; duration=0sec 2024-11-14T09:58:04,295 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:04,296 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:04,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:04,435 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:05,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:05,436 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:06,273 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:06,273 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:58:06,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5a6cd6821dd04c5295e2acc42a242768 is 1080, key is row0097/info:/1731578286261/Put/seqid=0 2024-11-14T09:58:06,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741857_1033 (size=12516) 2024-11-14T09:58:06,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741857_1033 (size=12516) 2024-11-14T09:58:06,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=138 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5a6cd6821dd04c5295e2acc42a242768 2024-11-14T09:58:06,293 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/5a6cd6821dd04c5295e2acc42a242768 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768 2024-11-14T09:58:06,299 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768, entries=7, sequenceid=138, filesize=12.2 K 2024-11-14T09:58:06,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=11.56 KB/11836 for 1fd3d7545975d433190167fa08726b24 in 27ms, sequenceid=138, compaction requested=true 2024-11-14T09:58:06,300 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:06,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:06,300 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:06,300 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:06,301 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 60358 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:06,301 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:06,302 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:06,302 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5550c2561c9443958f31a5ac16c2f0ad, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=58.9 K 2024-11-14T09:58:06,302 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:06,302 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T09:58:06,302 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5550c2561c9443958f31a5ac16c2f0ad, keycount=21, bloomtype=ROW, size=27.1 K, encoding=NONE, compression=NONE, seqNum=110, earliestPutTs=1731578262000 2024-11-14T09:58:06,303 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5c166967079d4c3bacede27dc59b0859, keycount=14, bloomtype=ROW, size=19.6 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1731578284232 2024-11-14T09:58:06,303 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a6cd6821dd04c5295e2acc42a242768, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731578286261 2024-11-14T09:58:06,306 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da3eb2b44cd24a4daa1a839e391b7302 is 1080, key is row0104/info:/1731578286275/Put/seqid=0 2024-11-14T09:58:06,315 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741858_1034 (size=17906) 2024-11-14T09:58:06,317 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=153 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da3eb2b44cd24a4daa1a839e391b7302 2024-11-14T09:58:06,317 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#74 average throughput is 43.10 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:06,318 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b7a6e1c7d29e418abafbb3f710dcae02 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:06,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741858_1034 (size=17906) 2024-11-14T09:58:06,324 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/da3eb2b44cd24a4daa1a839e391b7302 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302 2024-11-14T09:58:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741859_1035 (size=50540) 2024-11-14T09:58:06,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741859_1035 (size=50540) 2024-11-14T09:58:06,330 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302, entries=12, sequenceid=153, filesize=17.5 K 2024-11-14T09:58:06,331 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=10.51 KB/10760 for 1fd3d7545975d433190167fa08726b24 in 29ms, sequenceid=153, compaction requested=false 2024-11-14T09:58:06,331 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:06,332 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:06,332 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-14T09:58:06,334 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b7a6e1c7d29e418abafbb3f710dcae02 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b7a6e1c7d29e418abafbb3f710dcae02 2024-11-14T09:58:06,338 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b535bb8abde44deba95d80110084f3d6 is 1080, key is row0116/info:/1731578286304/Put/seqid=0 2024-11-14T09:58:06,342 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into b7a6e1c7d29e418abafbb3f710dcae02(size=49.4 K), total size for store is 66.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:06,342 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:06,342 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578286300; duration=0sec 2024-11-14T09:58:06,342 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:06,342 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:06,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741860_1036 (size=16828) 2024-11-14T09:58:06,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741860_1036 (size=16828) 2024-11-14T09:58:06,344 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=167 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b535bb8abde44deba95d80110084f3d6 2024-11-14T09:58:06,349 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b535bb8abde44deba95d80110084f3d6 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6 2024-11-14T09:58:06,355 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6, entries=11, sequenceid=167, filesize=16.4 K 2024-11-14T09:58:06,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 1fd3d7545975d433190167fa08726b24 in 24ms, sequenceid=167, compaction requested=true 2024-11-14T09:58:06,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:06,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:06,356 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:06,356 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:06,357 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 85274 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:06,357 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:06,357 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:06,357 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b7a6e1c7d29e418abafbb3f710dcae02, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=83.3 K 2024-11-14T09:58:06,358 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting b7a6e1c7d29e418abafbb3f710dcae02, keycount=42, bloomtype=ROW, size=49.4 K, encoding=NONE, compression=NONE, seqNum=138, earliestPutTs=1731578262000 2024-11-14T09:58:06,358 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting da3eb2b44cd24a4daa1a839e391b7302, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=153, earliestPutTs=1731578286275 2024-11-14T09:58:06,359 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting b535bb8abde44deba95d80110084f3d6, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731578286304 2024-11-14T09:58:06,369 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#76 average throughput is 33.35 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:06,369 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/75e98a2db3bc413e973c19aad263bc22 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741861_1037 (size=75577) 2024-11-14T09:58:06,373 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741861_1037 (size=75577) 2024-11-14T09:58:06,380 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/75e98a2db3bc413e973c19aad263bc22 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/75e98a2db3bc413e973c19aad263bc22 2024-11-14T09:58:06,386 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into 75e98a2db3bc413e973c19aad263bc22(size=73.8 K), total size for store is 73.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:06,386 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:06,386 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578286356; duration=0sec 2024-11-14T09:58:06,386 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:06,386 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:06,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:06,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:07,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:07,437 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:08,349 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:08,349 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:58:08,373 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/659cdcda45e34f71b1cf636da96c2bde is 1080, key is row0127/info:/1731578286334/Put/seqid=0 2024-11-14T09:58:08,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741862_1038 (size=12516) 2024-11-14T09:58:08,394 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741862_1038 (size=12516) 2024-11-14T09:58:08,394 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=179 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/659cdcda45e34f71b1cf636da96c2bde 2024-11-14T09:58:08,401 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/659cdcda45e34f71b1cf636da96c2bde as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde 2024-11-14T09:58:08,406 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde, entries=7, sequenceid=179, filesize=12.2 K 2024-11-14T09:58:08,407 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for 1fd3d7545975d433190167fa08726b24 in 58ms, sequenceid=179, compaction requested=false 2024-11-14T09:58:08,407 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:08,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:08,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-14T09:58:08,413 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/504e3e80708c41618e0f80baf4b19086 is 1080, key is row0134/info:/1731578288351/Put/seqid=0 2024-11-14T09:58:08,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741863_1039 (size=24394) 2024-11-14T09:58:08,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741863_1039 (size=24394) 2024-11-14T09:58:08,418 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=200 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/504e3e80708c41618e0f80baf4b19086 2024-11-14T09:58:08,424 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/504e3e80708c41618e0f80baf4b19086 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086 2024-11-14T09:58:08,429 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086, entries=18, sequenceid=200, filesize=23.8 K 2024-11-14T09:58:08,430 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=9.46 KB/9684 for 1fd3d7545975d433190167fa08726b24 in 21ms, sequenceid=200, compaction requested=true 2024-11-14T09:58:08,430 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:08,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:08,430 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:08,431 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:08,432 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 112487 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:08,432 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:08,432 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:08,432 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/75e98a2db3bc413e973c19aad263bc22, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=109.9 K 2024-11-14T09:58:08,432 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 75e98a2db3bc413e973c19aad263bc22, keycount=65, bloomtype=ROW, size=73.8 K, encoding=NONE, compression=NONE, seqNum=167, earliestPutTs=1731578262000 2024-11-14T09:58:08,433 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 659cdcda45e34f71b1cf636da96c2bde, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=179, earliestPutTs=1731578286334 2024-11-14T09:58:08,433 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 504e3e80708c41618e0f80baf4b19086, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1731578288351 2024-11-14T09:58:08,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:08,438 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:08,443 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#79 average throughput is 46.18 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:08,444 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/fac81d96a84943e08e2f515f7a163e66 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:08,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741864_1040 (size=102633) 2024-11-14T09:58:08,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741864_1040 (size=102633) 2024-11-14T09:58:08,452 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/fac81d96a84943e08e2f515f7a163e66 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/fac81d96a84943e08e2f515f7a163e66 2024-11-14T09:58:08,458 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into fac81d96a84943e08e2f515f7a163e66(size=100.2 K), total size for store is 100.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:08,459 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:08,459 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578288430; duration=0sec 2024-11-14T09:58:08,459 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:08,459 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:09,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:09,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:09,735 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-14T09:58:09,735 INFO [master/defc576eb6b7:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-14T09:58:10,432 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:10,433 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-14T09:58:10,438 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/20ac740182e54bb9a34d44e6f3f1c16f is 1080, key is row0152/info:/1731578288410/Put/seqid=0 2024-11-14T09:58:10,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:10,439 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741865_1041 (size=15750) 2024-11-14T09:58:10,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741865_1041 (size=15750) 2024-11-14T09:58:10,444 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=214 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/20ac740182e54bb9a34d44e6f3f1c16f 2024-11-14T09:58:10,450 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/20ac740182e54bb9a34d44e6f3f1c16f as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f 2024-11-14T09:58:10,456 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f, entries=10, sequenceid=214, filesize=15.4 K 2024-11-14T09:58:10,457 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=11.56 KB/11836 for 1fd3d7545975d433190167fa08726b24 in 25ms, sequenceid=214, compaction requested=false 2024-11-14T09:58:10,457 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:10,458 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:10,458 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-14T09:58:10,462 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/4ec7cccc014543a59d85e89a75ce1dc6 is 1080, key is row0162/info:/1731578290435/Put/seqid=0 2024-11-14T09:58:10,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741866_1042 (size=17906) 2024-11-14T09:58:10,478 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741866_1042 (size=17906) 2024-11-14T09:58:10,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=229 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/4ec7cccc014543a59d85e89a75ce1dc6 2024-11-14T09:58:10,483 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/4ec7cccc014543a59d85e89a75ce1dc6 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6 2024-11-14T09:58:10,488 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6, entries=12, sequenceid=229, filesize=17.5 K 2024-11-14T09:58:10,489 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=15.76 KB/16140 for 1fd3d7545975d433190167fa08726b24 in 31ms, sequenceid=229, compaction requested=true 2024-11-14T09:58:10,489 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:10,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:10,489 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:10,489 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:10,490 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 136289 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:10,491 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:10,491 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:10,491 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/fac81d96a84943e08e2f515f7a163e66, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=133.1 K 2024-11-14T09:58:10,491 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:10,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-14T09:58:10,491 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting fac81d96a84943e08e2f515f7a163e66, keycount=90, bloomtype=ROW, size=100.2 K, encoding=NONE, compression=NONE, seqNum=200, earliestPutTs=1731578262000 2024-11-14T09:58:10,492 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 20ac740182e54bb9a34d44e6f3f1c16f, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=214, earliestPutTs=1731578288410 2024-11-14T09:58:10,492 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4ec7cccc014543a59d85e89a75ce1dc6, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731578290435 2024-11-14T09:58:10,496 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a56af53acfed46aea562fcd25f727b19 is 1080, key is row0174/info:/1731578290459/Put/seqid=0 2024-11-14T09:58:10,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741867_1043 (size=23316) 2024-11-14T09:58:10,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741867_1043 (size=23316) 2024-11-14T09:58:10,504 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a56af53acfed46aea562fcd25f727b19 2024-11-14T09:58:10,508 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#83 average throughput is 38.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:10,509 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b26cd64b6c2c434aa4242111eeeeb718 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:10,511 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a56af53acfed46aea562fcd25f727b19 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19 2024-11-14T09:58:10,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741868_1044 (size=126587) 2024-11-14T09:58:10,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741868_1044 (size=126587) 2024-11-14T09:58:10,517 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19, entries=17, sequenceid=249, filesize=22.8 K 2024-11-14T09:58:10,518 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=2.10 KB/2152 for 1fd3d7545975d433190167fa08726b24 in 27ms, sequenceid=249, compaction requested=false 2024-11-14T09:58:10,518 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:10,520 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/b26cd64b6c2c434aa4242111eeeeb718 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b26cd64b6c2c434aa4242111eeeeb718 2024-11-14T09:58:10,526 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into b26cd64b6c2c434aa4242111eeeeb718(size=123.6 K), total size for store is 146.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:10,526 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:10,526 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578290489; duration=0sec 2024-11-14T09:58:10,526 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:10,526 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:11,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:11,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:12,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:12,440 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:12,507 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:12,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:58:12,511 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/3beab327300a4725a094fe966dbc0be8 is 1080, key is row0191/info:/1731578290492/Put/seqid=0 2024-11-14T09:58:12,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741869_1045 (size=12520) 2024-11-14T09:58:12,519 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741869_1045 (size=12520) 2024-11-14T09:58:12,519 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=260 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/3beab327300a4725a094fe966dbc0be8 2024-11-14T09:58:12,525 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/3beab327300a4725a094fe966dbc0be8 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8 2024-11-14T09:58:12,531 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8, entries=7, sequenceid=260, filesize=12.2 K 2024-11-14T09:58:12,531 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 1fd3d7545975d433190167fa08726b24 in 24ms, sequenceid=260, compaction requested=true 2024-11-14T09:58:12,531 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:12,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:12,532 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:12,532 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:12,533 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 162423 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:12,533 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:12,533 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:12,533 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b26cd64b6c2c434aa4242111eeeeb718, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=158.6 K 2024-11-14T09:58:12,533 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:12,534 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting b26cd64b6c2c434aa4242111eeeeb718, keycount=112, bloomtype=ROW, size=123.6 K, encoding=NONE, compression=NONE, seqNum=229, earliestPutTs=1731578262000 2024-11-14T09:58:12,534 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T09:58:12,534 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting a56af53acfed46aea562fcd25f727b19, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1731578290459 2024-11-14T09:58:12,535 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3beab327300a4725a094fe966dbc0be8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1731578290492 2024-11-14T09:58:12,538 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/019593a2b3364bf69b894f31cbeaf19a is 1080, key is row0198/info:/1731578292508/Put/seqid=0 2024-11-14T09:58:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741870_1046 (size=19013) 2024-11-14T09:58:12,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741870_1046 (size=19013) 2024-11-14T09:58:12,547 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=276 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/019593a2b3364bf69b894f31cbeaf19a 2024-11-14T09:58:12,548 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#86 average throughput is 69.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:12,549 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/df8a38ed43e94bc6a7f4ee57926e0ee0 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:12,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741871_1047 (size=152638) 2024-11-14T09:58:12,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741871_1047 (size=152638) 2024-11-14T09:58:12,556 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/019593a2b3364bf69b894f31cbeaf19a as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a 2024-11-14T09:58:12,559 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/df8a38ed43e94bc6a7f4ee57926e0ee0 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/df8a38ed43e94bc6a7f4ee57926e0ee0 2024-11-14T09:58:12,561 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a, entries=13, sequenceid=276, filesize=18.6 K 2024-11-14T09:58:12,562 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=12.61 KB/12912 for 1fd3d7545975d433190167fa08726b24 in 29ms, sequenceid=276, compaction requested=false 2024-11-14T09:58:12,562 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:12,563 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:12,563 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-14T09:58:12,567 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into df8a38ed43e94bc6a7f4ee57926e0ee0(size=149.1 K), total size for store is 167.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:12,567 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:12,567 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578292532; duration=0sec 2024-11-14T09:58:12,567 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:12,567 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:12,567 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/81c9e4359bf64d98b4e9dc46307cb1c8 is 1080, key is row0211/info:/1731578292535/Put/seqid=0 2024-11-14T09:58:12,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741872_1048 (size=19013) 2024-11-14T09:58:12,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741872_1048 (size=19013) 2024-11-14T09:58:12,572 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=292 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/81c9e4359bf64d98b4e9dc46307cb1c8 2024-11-14T09:58:12,578 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/81c9e4359bf64d98b4e9dc46307cb1c8 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8 2024-11-14T09:58:12,584 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8, entries=13, sequenceid=292, filesize=18.6 K 2024-11-14T09:58:12,585 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=1.05 KB/1076 for 1fd3d7545975d433190167fa08726b24 in 22ms, sequenceid=292, compaction requested=true 2024-11-14T09:58:12,585 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:12,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:12,585 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:12,585 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:12,586 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 190664 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:12,586 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:12,586 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:12,587 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/df8a38ed43e94bc6a7f4ee57926e0ee0, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=186.2 K 2024-11-14T09:58:12,587 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting df8a38ed43e94bc6a7f4ee57926e0ee0, keycount=136, bloomtype=ROW, size=149.1 K, encoding=NONE, compression=NONE, seqNum=260, earliestPutTs=1731578262000 2024-11-14T09:58:12,587 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 019593a2b3364bf69b894f31cbeaf19a, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=276, earliestPutTs=1731578292508 2024-11-14T09:58:12,588 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 81c9e4359bf64d98b4e9dc46307cb1c8, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731578292535 2024-11-14T09:58:12,600 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#88 average throughput is 41.56 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:12,600 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/1454aaebd16641dbb77f07d08e3fd33b is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:12,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741873_1049 (size=180802) 2024-11-14T09:58:12,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741873_1049 (size=180802) 2024-11-14T09:58:12,608 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/1454aaebd16641dbb77f07d08e3fd33b as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1454aaebd16641dbb77f07d08e3fd33b 2024-11-14T09:58:12,614 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into 1454aaebd16641dbb77f07d08e3fd33b(size=176.6 K), total size for store is 176.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:12,614 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:12,614 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578292585; duration=0sec 2024-11-14T09:58:12,614 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:12,614 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:13,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:13,441 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:14,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:14,442 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:14,577 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:14,577 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-14T09:58:14,581 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/8d9b88692f4a40fb8cead48a1f05ec1a is 1080, key is row0224/info:/1731578292565/Put/seqid=0 2024-11-14T09:58:14,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741874_1050 (size=12523) 2024-11-14T09:58:14,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741874_1050 (size=12523) 2024-11-14T09:58:14,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=304 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/8d9b88692f4a40fb8cead48a1f05ec1a 2024-11-14T09:58:14,608 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/8d9b88692f4a40fb8cead48a1f05ec1a as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a 2024-11-14T09:58:14,614 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a, entries=7, sequenceid=304, filesize=12.2 K 2024-11-14T09:58:14,614 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 1fd3d7545975d433190167fa08726b24 in 37ms, sequenceid=304, compaction requested=false 2024-11-14T09:58:14,615 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:14,616 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34147 {}] regionserver.HRegion(8855): Flush requested on 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:14,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=21.02 KB heapSize=22.75 KB 2024-11-14T09:58:14,620 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a7896a403fa64c7495f86ff5610bc9e2 is 1080, key is row0231/info:/1731578294578/Put/seqid=0 2024-11-14T09:58:14,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741875_1051 (size=26570) 2024-11-14T09:58:14,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741875_1051 (size=26570) 2024-11-14T09:58:14,627 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=21.02 KB at sequenceid=327 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a7896a403fa64c7495f86ff5610bc9e2 2024-11-14T09:58:14,632 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/a7896a403fa64c7495f86ff5610bc9e2 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2 2024-11-14T09:58:14,637 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2, entries=20, sequenceid=327, filesize=25.9 K 2024-11-14T09:58:14,638 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~21.02 KB/21520, heapSize ~22.73 KB/23280, currentSize=6.30 KB/6456 for 1fd3d7545975d433190167fa08726b24 in 22ms, sequenceid=327, compaction requested=true 2024-11-14T09:58:14,638 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:14,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 1fd3d7545975d433190167fa08726b24:info, priority=-2147483648, current under compaction store size is 1 2024-11-14T09:58:14,638 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:14,638 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-14T09:58:14,639 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 219895 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-14T09:58:14,639 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1541): 1fd3d7545975d433190167fa08726b24/info is initiating minor compaction (all files) 2024-11-14T09:58:14,640 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 1fd3d7545975d433190167fa08726b24/info in TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:14,640 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1454aaebd16641dbb77f07d08e3fd33b, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2] into tmpdir=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp, totalSize=214.7 K 2024-11-14T09:58:14,640 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 1454aaebd16641dbb77f07d08e3fd33b, keycount=162, bloomtype=ROW, size=176.6 K, encoding=NONE, compression=NONE, seqNum=292, earliestPutTs=1731578262000 2024-11-14T09:58:14,641 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8d9b88692f4a40fb8cead48a1f05ec1a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=304, earliestPutTs=1731578292565 2024-11-14T09:58:14,641 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] compactions.Compactor(225): Compacting a7896a403fa64c7495f86ff5610bc9e2, keycount=20, bloomtype=ROW, size=25.9 K, encoding=NONE, compression=NONE, seqNum=327, earliestPutTs=1731578294578 2024-11-14T09:58:14,649 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-14T09:58:14,654 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 1fd3d7545975d433190167fa08726b24#info#compaction#91 average throughput is 38.79 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-14T09:58:14,655 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/899b7155fd454b73a2ec21efd977dba1 is 1080, key is row0062/info:/1731578262000/Put/seqid=0 2024-11-14T09:58:14,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741876_1052 (size=210114) 2024-11-14T09:58:14,656 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741876_1052 (size=210114) 2024-11-14T09:58:14,662 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/899b7155fd454b73a2ec21efd977dba1 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/899b7155fd454b73a2ec21efd977dba1 2024-11-14T09:58:14,667 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 1fd3d7545975d433190167fa08726b24/info of 1fd3d7545975d433190167fa08726b24 into 899b7155fd454b73a2ec21efd977dba1(size=205.2 K), total size for store is 205.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-14T09:58:14,667 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:14,667 INFO [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., storeName=1fd3d7545975d433190167fa08726b24/info, priority=13, startTime=1731578294638; duration=0sec 2024-11-14T09:58:14,668 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-14T09:58:14,668 DEBUG [RS:0;defc576eb6b7:34147-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 1fd3d7545975d433190167fa08726b24:info 2024-11-14T09:58:15,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:15,443 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:16,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:16,444 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:16,628 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-14T09:58:16,629 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C34147%2C1731578248657.1731578296629 2024-11-14T09:58:16,635 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,635 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,635 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,635 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,635 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,635 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578249289 with entries=314, filesize=308.87 KB; new WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578296629 2024-11-14T09:58:16,636 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39729:39729),(127.0.0.1/127.0.0.1:33689:33689)] 2024-11-14T09:58:16,636 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578249289 is not closed yet, will try archiving it next time 2024-11-14T09:58:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741833_1009 (size=316286) 2024-11-14T09:58:16,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741833_1009 (size=316286) 2024-11-14T09:58:16,640 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1fd3d7545975d433190167fa08726b24 1/1 column families, dataSize=6.30 KB heapSize=7 KB 2024-11-14T09:58:16,644 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/275d32d78e424c6b8b36f00a7fc663ab is 1080, key is row0251/info:/1731578294617/Put/seqid=0 2024-11-14T09:58:16,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741878_1054 (size=11436) 2024-11-14T09:58:16,650 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741878_1054 (size=11436) 2024-11-14T09:58:16,651 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=6.30 KB at sequenceid=337 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/275d32d78e424c6b8b36f00a7fc663ab 2024-11-14T09:58:16,656 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/.tmp/info/275d32d78e424c6b8b36f00a7fc663ab as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/275d32d78e424c6b8b36f00a7fc663ab 2024-11-14T09:58:16,661 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/275d32d78e424c6b8b36f00a7fc663ab, entries=6, sequenceid=337, filesize=11.2 K 2024-11-14T09:58:16,662 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~6.30 KB/6456, heapSize ~6.98 KB/7152, currentSize=0 B/0 for 1fd3d7545975d433190167fa08726b24 in 22ms, sequenceid=337, compaction requested=false 2024-11-14T09:58:16,662 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1fd3d7545975d433190167fa08726b24: 2024-11-14T09:58:16,662 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 0d7002df19b51f1b02d2741a1f5a7eb3: 2024-11-14T09:58:16,662 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-14T09:58:16,670 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/4e0515509be74a4587f9deb58d735bb3 is 193, key is TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24./info:regioninfo/1731578273270/Put/seqid=0 2024-11-14T09:58:16,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741879_1055 (size=6223) 2024-11-14T09:58:16,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741879_1055 (size=6223) 2024-11-14T09:58:16,682 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/4e0515509be74a4587f9deb58d735bb3 2024-11-14T09:58:16,687 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/.tmp/info/4e0515509be74a4587f9deb58d735bb3 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/info/4e0515509be74a4587f9deb58d735bb3 2024-11-14T09:58:16,692 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/info/4e0515509be74a4587f9deb58d735bb3, entries=5, sequenceid=21, filesize=6.1 K 2024-11-14T09:58:16,693 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 31ms, sequenceid=21, compaction requested=false 2024-11-14T09:58:16,693 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-14T09:58:16,694 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C34147%2C1731578248657.1731578296694 2024-11-14T09:58:16,700 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,701 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,701 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,701 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,701 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,701 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578296629 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578296694 2024-11-14T09:58:16,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39729:39729),(127.0.0.1/127.0.0.1:33689:33689)] 2024-11-14T09:58:16,703 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578296629 is not closed yet, will try archiving it next time 2024-11-14T09:58:16,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741877_1053 (size=731) 2024-11-14T09:58:16,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741877_1053 (size=731) 2024-11-14T09:58:16,703 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578249289 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs/defc576eb6b7%2C34147%2C1731578248657.1731578249289 2024-11-14T09:58:16,704 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-14T09:58:16,704 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:58:16,704 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:58:16,705 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:16,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:16,705 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:16,705 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:58:16,705 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=605387780, stopped=false 2024-11-14T09:58:16,705 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:58:16,705 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,46299,1731578248370 2024-11-14T09:58:16,705 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/WALs/defc576eb6b7,34147,1731578248657/defc576eb6b7%2C34147%2C1731578248657.1731578296629 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs/defc576eb6b7%2C34147%2C1731578248657.1731578296629 2024-11-14T09:58:16,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:16,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:16,777 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:58:16,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:16,777 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:16,777 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:58:16,777 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:16,777 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:16,777 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,34147,1731578248657' ***** 2024-11-14T09:58:16,777 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:58:16,778 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(3091): Received CLOSE for 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(3091): Received CLOSE for 0d7002df19b51f1b02d2741a1f5a7eb3 2024-11-14T09:58:16,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,34147,1731578248657 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:58:16,778 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:34147. 2024-11-14T09:58:16,778 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 1fd3d7545975d433190167fa08726b24, disabling compactions & flushes 2024-11-14T09:58:16,778 DEBUG [RS:0;defc576eb6b7:34147 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:16,778 DEBUG [RS:0;defc576eb6b7:34147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:16,778 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:16,778 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:16,778 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. after waiting 0 ms 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:58:16,778 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:58:16,778 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:58:16,779 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:58:16,779 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-14T09:58:16,779 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1325): Online Regions={1fd3d7545975d433190167fa08726b24=TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24., 0d7002df19b51f1b02d2741a1f5a7eb3=TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3., 1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:58:16,779 DEBUG [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1351): Waiting on 0d7002df19b51f1b02d2741a1f5a7eb3, 1588230740, 1fd3d7545975d433190167fa08726b24 2024-11-14T09:58:16,779 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:58:16,779 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:58:16,779 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:58:16,779 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:58:16,779 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:58:16,779 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-top, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5550c2561c9443958f31a5ac16c2f0ad, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b7a6e1c7d29e418abafbb3f710dcae02, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/75e98a2db3bc413e973c19aad263bc22, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/fac81d96a84943e08e2f515f7a163e66, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b26cd64b6c2c434aa4242111eeeeb718, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/df8a38ed43e94bc6a7f4ee57926e0ee0, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1454aaebd16641dbb77f07d08e3fd33b, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2] to archive 2024-11-14T09:58:16,781 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:58:16,790 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:58:16,790 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-14T09:58:16,791 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:58:16,791 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:58:16,791 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578296779Running coprocessor pre-close hooks at 1731578296779Disabling compacts and flushes for region at 1731578296779Disabling writes for close at 1731578296779Writing region close event to WAL at 1731578296780 (+1 ms)Running coprocessor post-close hooks at 1731578296791 (+11 ms)Closed at 1731578296791 2024-11-14T09:58:16,791 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:58:16,791 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1b9e7ab6dbc9453db9f9bc3bc185f4a3 2024-11-14T09:58:16,793 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da55c919413c4833b8a7eda93897426e 2024-11-14T09:58:16,794 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5550c2561c9443958f31a5ac16c2f0ad to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5550c2561c9443958f31a5ac16c2f0ad 2024-11-14T09:58:16,795 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/d7e30271af9a4100a3dec0009713825a 2024-11-14T09:58:16,796 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5c166967079d4c3bacede27dc59b0859 2024-11-14T09:58:16,798 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b7a6e1c7d29e418abafbb3f710dcae02 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b7a6e1c7d29e418abafbb3f710dcae02 2024-11-14T09:58:16,799 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/5a6cd6821dd04c5295e2acc42a242768 2024-11-14T09:58:16,800 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/da3eb2b44cd24a4daa1a839e391b7302 2024-11-14T09:58:16,801 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/75e98a2db3bc413e973c19aad263bc22 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/75e98a2db3bc413e973c19aad263bc22 2024-11-14T09:58:16,802 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b535bb8abde44deba95d80110084f3d6 2024-11-14T09:58:16,803 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/659cdcda45e34f71b1cf636da96c2bde 2024-11-14T09:58:16,804 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/fac81d96a84943e08e2f515f7a163e66 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/fac81d96a84943e08e2f515f7a163e66 2024-11-14T09:58:16,806 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/504e3e80708c41618e0f80baf4b19086 2024-11-14T09:58:16,807 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/20ac740182e54bb9a34d44e6f3f1c16f 2024-11-14T09:58:16,808 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b26cd64b6c2c434aa4242111eeeeb718 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/b26cd64b6c2c434aa4242111eeeeb718 2024-11-14T09:58:16,810 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/4ec7cccc014543a59d85e89a75ce1dc6 2024-11-14T09:58:16,811 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a56af53acfed46aea562fcd25f727b19 2024-11-14T09:58:16,812 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/df8a38ed43e94bc6a7f4ee57926e0ee0 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/df8a38ed43e94bc6a7f4ee57926e0ee0 2024-11-14T09:58:16,812 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/3beab327300a4725a094fe966dbc0be8 2024-11-14T09:58:16,814 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/019593a2b3364bf69b894f31cbeaf19a 2024-11-14T09:58:16,814 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1454aaebd16641dbb77f07d08e3fd33b to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/1454aaebd16641dbb77f07d08e3fd33b 2024-11-14T09:58:16,815 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/81c9e4359bf64d98b4e9dc46307cb1c8 2024-11-14T09:58:16,816 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/8d9b88692f4a40fb8cead48a1f05ec1a 2024-11-14T09:58:16,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/info/a7896a403fa64c7495f86ff5610bc9e2 2024-11-14T09:58:16,818 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=defc576eb6b7:46299 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-14T09:58:16,819 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [1b9e7ab6dbc9453db9f9bc3bc185f4a3=8260, da55c919413c4833b8a7eda93897426e=12509, 5550c2561c9443958f31a5ac16c2f0ad=27778, d7e30271af9a4100a3dec0009713825a=16817, 5c166967079d4c3bacede27dc59b0859=20064, b7a6e1c7d29e418abafbb3f710dcae02=50540, 5a6cd6821dd04c5295e2acc42a242768=12516, da3eb2b44cd24a4daa1a839e391b7302=17906, 75e98a2db3bc413e973c19aad263bc22=75577, b535bb8abde44deba95d80110084f3d6=16828, 659cdcda45e34f71b1cf636da96c2bde=12516, fac81d96a84943e08e2f515f7a163e66=102633, 504e3e80708c41618e0f80baf4b19086=24394, 20ac740182e54bb9a34d44e6f3f1c16f=15750, b26cd64b6c2c434aa4242111eeeeb718=126587, 4ec7cccc014543a59d85e89a75ce1dc6=17906, a56af53acfed46aea562fcd25f727b19=23316, df8a38ed43e94bc6a7f4ee57926e0ee0=152638, 3beab327300a4725a094fe966dbc0be8=12520, 019593a2b3364bf69b894f31cbeaf19a=19013, 1454aaebd16641dbb77f07d08e3fd33b=180802, 81c9e4359bf64d98b4e9dc46307cb1c8=19013, 8d9b88692f4a40fb8cead48a1f05ec1a=12523, a7896a403fa64c7495f86ff5610bc9e2=26570] 2024-11-14T09:58:16,822 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/1fd3d7545975d433190167fa08726b24/recovered.edits/340.seqid, newMaxSeqId=340, maxSeqId=85 2024-11-14T09:58:16,823 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 1fd3d7545975d433190167fa08726b24: Waiting for close lock at 1731578296778Running coprocessor pre-close hooks at 1731578296778Disabling compacts and flushes for region at 1731578296778Disabling writes for close at 1731578296778Writing region close event to WAL at 1731578296819 (+41 ms)Running coprocessor post-close hooks at 1731578296823 (+4 ms)Closed at 1731578296823 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1731578272185.1fd3d7545975d433190167fa08726b24. 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0d7002df19b51f1b02d2741a1f5a7eb3, disabling compactions & flushes 2024-11-14T09:58:16,823 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. after waiting 0 ms 2024-11-14T09:58:16,823 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:58:16,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053->hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/382381bfe8c6861e82c5e0cdf94eb053/info/2f8c56f58d1d48e1be9aca11804a8ffe-bottom] to archive 2024-11-14T09:58:16,824 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-14T09:58:16,826 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053 to hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/archive/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/info/2f8c56f58d1d48e1be9aca11804a8ffe.382381bfe8c6861e82c5e0cdf94eb053 2024-11-14T09:58:16,826 WARN [StoreCloser-TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-14T09:58:16,829 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/data/default/TestLogRolling-testLogRolling/0d7002df19b51f1b02d2741a1f5a7eb3/recovered.edits/90.seqid, newMaxSeqId=90, maxSeqId=85 2024-11-14T09:58:16,830 INFO [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:58:16,830 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0d7002df19b51f1b02d2741a1f5a7eb3: Waiting for close lock at 1731578296823Running coprocessor pre-close hooks at 1731578296823Disabling compacts and flushes for region at 1731578296823Disabling writes for close at 1731578296823Writing region close event to WAL at 1731578296826 (+3 ms)Running coprocessor post-close hooks at 1731578296830 (+4 ms)Closed at 1731578296830 2024-11-14T09:58:16,830 DEBUG [RS_CLOSE_REGION-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1731578272185.0d7002df19b51f1b02d2741a1f5a7eb3. 2024-11-14T09:58:16,979 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,34147,1731578248657; all regions closed. 2024-11-14T09:58:16,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,979 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741834_1010 (size=8107) 2024-11-14T09:58:16,984 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741834_1010 (size=8107) 2024-11-14T09:58:16,986 DEBUG [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs 2024-11-14T09:58:16,986 INFO [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C34147%2C1731578248657.meta:.meta(num 1731578249624) 2024-11-14T09:58:16,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,987 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:16,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741880_1056 (size=778) 2024-11-14T09:58:16,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741880_1056 (size=778) 2024-11-14T09:58:16,991 DEBUG [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/oldWALs 2024-11-14T09:58:16,991 INFO [RS:0;defc576eb6b7:34147 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C34147%2C1731578248657:(num 1731578296694) 2024-11-14T09:58:16,991 DEBUG [RS:0;defc576eb6b7:34147 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:16,991 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:58:16,991 INFO [RS:0;defc576eb6b7:34147 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:58:16,991 INFO [RS:0;defc576eb6b7:34147 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:58:16,991 INFO [RS:0;defc576eb6b7:34147 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:58:16,992 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:58:16,992 INFO [RS:0;defc576eb6b7:34147 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34147 2024-11-14T09:58:16,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:58:16,998 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,34147,1731578248657 2024-11-14T09:58:16,998 INFO [RS:0;defc576eb6b7:34147 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:58:16,998 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,34147,1731578248657] 2024-11-14T09:58:17,019 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,34147,1731578248657 already deleted, retry=false 2024-11-14T09:58:17,019 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,34147,1731578248657 expired; onlineServers=0 2024-11-14T09:58:17,019 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,46299,1731578248370' ***** 2024-11-14T09:58:17,019 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:58:17,019 INFO [M:0;defc576eb6b7:46299 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:58:17,019 INFO [M:0;defc576eb6b7:46299 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:58:17,019 DEBUG [M:0;defc576eb6b7:46299 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:58:17,019 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:58:17,019 DEBUG [M:0;defc576eb6b7:46299 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:58:17,019 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578249009 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578249009,5,FailOnTimeoutGroup] 2024-11-14T09:58:17,019 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578249009 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578249009,5,FailOnTimeoutGroup] 2024-11-14T09:58:17,019 INFO [M:0;defc576eb6b7:46299 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:58:17,019 INFO [M:0;defc576eb6b7:46299 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:58:17,019 DEBUG [M:0;defc576eb6b7:46299 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:58:17,020 INFO [M:0;defc576eb6b7:46299 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:58:17,020 INFO [M:0;defc576eb6b7:46299 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:58:17,020 INFO [M:0;defc576eb6b7:46299 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:58:17,020 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:58:17,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:58:17,029 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:17,029 DEBUG [M:0;defc576eb6b7:46299 {}] zookeeper.ZKUtil(347): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:58:17,029 WARN [M:0;defc576eb6b7:46299 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:58:17,030 INFO [M:0;defc576eb6b7:46299 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/.lastflushedseqids 2024-11-14T09:58:17,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741881_1057 (size=228) 2024-11-14T09:58:17,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741881_1057 (size=228) 2024-11-14T09:58:17,036 INFO [M:0;defc576eb6b7:46299 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:58:17,036 INFO [M:0;defc576eb6b7:46299 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:58:17,036 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:58:17,036 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:17,036 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:17,036 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:58:17,036 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:17,036 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.42 KB heapSize=63.35 KB 2024-11-14T09:58:17,052 DEBUG [M:0;defc576eb6b7:46299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/679bf29c89cc4227a61051e0da29650e is 82, key is hbase:meta,,1/info:regioninfo/1731578249664/Put/seqid=0 2024-11-14T09:58:17,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741882_1058 (size=5672) 2024-11-14T09:58:17,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741882_1058 (size=5672) 2024-11-14T09:58:17,057 INFO [M:0;defc576eb6b7:46299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/679bf29c89cc4227a61051e0da29650e 2024-11-14T09:58:17,078 DEBUG [M:0;defc576eb6b7:46299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b8015b1eb4843b299471620d75ddfe4 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1731578250176/Put/seqid=0 2024-11-14T09:58:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741883_1059 (size=7090) 2024-11-14T09:58:17,083 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741883_1059 (size=7090) 2024-11-14T09:58:17,084 INFO [M:0;defc576eb6b7:46299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.81 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b8015b1eb4843b299471620d75ddfe4 2024-11-14T09:58:17,088 INFO [M:0;defc576eb6b7:46299 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b8015b1eb4843b299471620d75ddfe4 2024-11-14T09:58:17,103 DEBUG [M:0;defc576eb6b7:46299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e55b288ee0974c08b509230aa6925bb2 is 69, key is defc576eb6b7,34147,1731578248657/rs:state/1731578249132/Put/seqid=0 2024-11-14T09:58:17,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741884_1060 (size=5156) 2024-11-14T09:58:17,107 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741884_1060 (size=5156) 2024-11-14T09:58:17,108 INFO [M:0;defc576eb6b7:46299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e55b288ee0974c08b509230aa6925bb2 2024-11-14T09:58:17,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:17,108 INFO [RS:0;defc576eb6b7:34147 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:58:17,108 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34147-0x10138c7384b0001, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:17,108 INFO [RS:0;defc576eb6b7:34147 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,34147,1731578248657; zookeeper connection closed. 2024-11-14T09:58:17,109 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@657f3ec8 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@657f3ec8 2024-11-14T09:58:17,109 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:58:17,128 DEBUG [M:0;defc576eb6b7:46299 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f3c4eccebd3497ba28365eb1e987510 is 52, key is load_balancer_on/state:d/1731578249792/Put/seqid=0 2024-11-14T09:58:17,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741885_1061 (size=5056) 2024-11-14T09:58:17,150 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741885_1061 (size=5056) 2024-11-14T09:58:17,151 INFO [M:0;defc576eb6b7:46299 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f3c4eccebd3497ba28365eb1e987510 2024-11-14T09:58:17,156 INFO [regionserver/defc576eb6b7:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:58:17,156 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/679bf29c89cc4227a61051e0da29650e as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/679bf29c89cc4227a61051e0da29650e 2024-11-14T09:58:17,162 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/679bf29c89cc4227a61051e0da29650e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-14T09:58:17,163 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/0b8015b1eb4843b299471620d75ddfe4 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b8015b1eb4843b299471620d75ddfe4 2024-11-14T09:58:17,168 INFO [M:0;defc576eb6b7:46299 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 0b8015b1eb4843b299471620d75ddfe4 2024-11-14T09:58:17,168 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/0b8015b1eb4843b299471620d75ddfe4, entries=13, sequenceid=125, filesize=6.9 K 2024-11-14T09:58:17,169 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e55b288ee0974c08b509230aa6925bb2 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e55b288ee0974c08b509230aa6925bb2 2024-11-14T09:58:17,175 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e55b288ee0974c08b509230aa6925bb2, entries=1, sequenceid=125, filesize=5.0 K 2024-11-14T09:58:17,177 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5f3c4eccebd3497ba28365eb1e987510 as hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5f3c4eccebd3497ba28365eb1e987510 2024-11-14T09:58:17,182 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:41179/user/jenkins/test-data/f48a891e-745f-785b-293d-ef83630035fe/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5f3c4eccebd3497ba28365eb1e987510, entries=1, sequenceid=125, filesize=4.9 K 2024-11-14T09:58:17,183 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false 2024-11-14T09:58:17,185 INFO [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:17,185 DEBUG [M:0;defc576eb6b7:46299 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578297036Disabling compacts and flushes for region at 1731578297036Disabling writes for close at 1731578297036Obtaining lock to block concurrent updates at 1731578297036Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578297036Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52651, getHeapSize=64808, getOffHeapSize=0, getCellsCount=148 at 1731578297037 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578297037Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578297037Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578297052 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578297052Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578297061 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578297078 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578297078Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578297089 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578297102 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578297102Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578297112 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578297127 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578297127Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@50992631: reopening flushed file at 1731578297156 (+29 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c745dc: reopening flushed file at 1731578297162 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@40c59987: reopening flushed file at 1731578297168 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4262794d: reopening flushed file at 1731578297176 (+8 ms)Finished flush of dataSize ~51.42 KB/52651, heapSize ~63.29 KB/64808, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 147ms, sequenceid=125, compaction requested=false at 1731578297183 (+7 ms)Writing region close event to WAL at 1731578297185 (+2 ms)Closed at 1731578297185 2024-11-14T09:58:17,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:17,186 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:17,186 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:17,186 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:17,186 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43971 is added to blk_1073741830_1006 (size=61320) 2024-11-14T09:58:17,188 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34273 is added to blk_1073741830_1006 (size=61320) 2024-11-14T09:58:17,189 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:58:17,189 INFO [M:0;defc576eb6b7:46299 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:58:17,189 INFO [M:0;defc576eb6b7:46299 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:46299 2024-11-14T09:58:17,189 INFO [M:0;defc576eb6b7:46299 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:58:17,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:17,298 INFO [M:0;defc576eb6b7:46299 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:58:17,298 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:46299-0x10138c7384b0000, quorum=127.0.0.1:55186, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:17,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7f9d8b97{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:17,301 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1435a214{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:17,301 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:17,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3af484fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:17,301 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5aa0bab9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:17,305 WARN [BP-1487981552-172.17.0.2-1731578245760 heartbeating to localhost/127.0.0.1:41179 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:58:17,305 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:58:17,305 WARN [BP-1487981552-172.17.0.2-1731578245760 heartbeating to localhost/127.0.0.1:41179 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1487981552-172.17.0.2-1731578245760 (Datanode Uuid 82059d17-3cdc-4403-b90b-51732aa1ecdc) service to localhost/127.0.0.1:41179 2024-11-14T09:58:17,305 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:58:17,305 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data3/current/BP-1487981552-172.17.0.2-1731578245760 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:17,305 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data4/current/BP-1487981552-172.17.0.2-1731578245760 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:17,306 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:58:17,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6901ebc1{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:17,310 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4d7def5f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:17,310 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:17,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79974a7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:17,310 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4438ca54{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:17,312 WARN [BP-1487981552-172.17.0.2-1731578245760 heartbeating to localhost/127.0.0.1:41179 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:58:17,312 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:58:17,312 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:58:17,312 WARN [BP-1487981552-172.17.0.2-1731578245760 heartbeating to localhost/127.0.0.1:41179 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1487981552-172.17.0.2-1731578245760 (Datanode Uuid 6dd64b2f-023d-4e82-8672-1c9526c8ffd9) service to localhost/127.0.0.1:41179 2024-11-14T09:58:17,312 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data1/current/BP-1487981552-172.17.0.2-1731578245760 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:17,313 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/cluster_7e65e3e5-cee2-4ee5-b0d3-373d2fe815c4/data/data2/current/BP-1487981552-172.17.0.2-1731578245760 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:17,313 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:58:17,318 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7a75c30e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:58:17,319 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1a0adda6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:17,319 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:17,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a2bf3b7{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:17,319 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34e3d9e2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:17,331 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:58:17,362 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:58:17,374 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=229 (was 206) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41179 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41179 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41179 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:41179 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:41179 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41179 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:41179 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:41179 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=230 (was 242), ProcessCount=11 (was 11), AvailableMemoryMB=3471 (was 3606) 2024-11-14T09:58:17,382 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=229, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=230, ProcessCount=11, AvailableMemoryMB=3471 2024-11-14T09:58:17,382 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.log.dir so I do NOT create it in target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/11775727-22fe-5463-0847-a25fbeef4a3b/hadoop.tmp.dir so I do NOT create it in target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9, deleteOnExit=true 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/test.cache.data in system properties and HBase conf 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.tmp.dir in system properties and HBase conf 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir in system properties and HBase conf 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-14T09:58:17,383 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-14T09:58:17,384 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/nfs.dump.dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/java.io.tmpdir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-14T09:58:17,384 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-14T09:58:17,385 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-14T09:58:17,399 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:58:17,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:17,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:17,765 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:58:17,770 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:58:17,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:58:17,772 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:58:17,772 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:58:17,777 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:58:17,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@426614f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:58:17,777 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@233bbfea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:58:17,886 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2b790cdd{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/java.io.tmpdir/jetty-localhost-45795-hadoop-hdfs-3_4_1-tests_jar-_-any-14217431296545641686/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:58:17,886 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1bd8a44a{HTTP/1.1, (http/1.1)}{localhost:45795} 2024-11-14T09:58:17,886 INFO [Time-limited test {}] server.Server(415): Started @302521ms 2024-11-14T09:58:17,899 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-14T09:58:18,117 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:58:18,119 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:58:18,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:58:18,119 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:58:18,119 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-14T09:58:18,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5e513ee6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:58:18,120 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1db48135{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:58:18,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@54697ec6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/java.io.tmpdir/jetty-localhost-36519-hadoop-hdfs-3_4_1-tests_jar-_-any-14362464514900489375/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:18,212 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4b4f311b{HTTP/1.1, (http/1.1)}{localhost:36519} 2024-11-14T09:58:18,212 INFO [Time-limited test {}] server.Server(415): Started @302847ms 2024-11-14T09:58:18,213 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:58:18,235 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-14T09:58:18,237 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-14T09:58:18,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-14T09:58:18,238 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-14T09:58:18,238 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-14T09:58:18,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20f48718{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,AVAILABLE} 2024-11-14T09:58:18,238 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@78e9eb81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-14T09:58:18,333 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@420a76f4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/java.io.tmpdir/jetty-localhost-33573-hadoop-hdfs-3_4_1-tests_jar-_-any-3878165811527977611/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:18,334 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5f6d8bd9{HTTP/1.1, (http/1.1)}{localhost:33573} 2024-11-14T09:58:18,334 INFO [Time-limited test {}] server.Server(415): Started @302969ms 2024-11-14T09:58:18,334 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-14T09:58:18,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:18,445 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:19,318 WARN [Thread-2510 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data1/current/BP-1345401396-172.17.0.2-1731578297402/current, will proceed with Du for space computation calculation, 2024-11-14T09:58:19,318 WARN [Thread-2511 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data2/current/BP-1345401396-172.17.0.2-1731578297402/current, will proceed with Du for space computation calculation, 2024-11-14T09:58:19,338 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:58:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ca259cb64c2b471 with lease ID 0x6eeedfbe0260b9ba: Processing first storage report for DS-716e6fea-2e80-48a2-887e-9dee2a3351fb from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=45ea8ec0-a953-474c-9765-fd60a101b1ae, infoPort=42429, infoSecurePort=0, ipcPort=35835, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402) 2024-11-14T09:58:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ca259cb64c2b471 with lease ID 0x6eeedfbe0260b9ba: from storage DS-716e6fea-2e80-48a2-887e-9dee2a3351fb node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=45ea8ec0-a953-474c-9765-fd60a101b1ae, infoPort=42429, infoSecurePort=0, ipcPort=35835, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:58:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5ca259cb64c2b471 with lease ID 0x6eeedfbe0260b9ba: Processing first storage report for DS-952d98db-8e7c-4773-9e81-e695cd13065e from datanode DatanodeRegistration(127.0.0.1:34635, datanodeUuid=45ea8ec0-a953-474c-9765-fd60a101b1ae, infoPort=42429, infoSecurePort=0, ipcPort=35835, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402) 2024-11-14T09:58:19,340 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ca259cb64c2b471 with lease ID 0x6eeedfbe0260b9ba: from storage DS-952d98db-8e7c-4773-9e81-e695cd13065e node DatanodeRegistration(127.0.0.1:34635, datanodeUuid=45ea8ec0-a953-474c-9765-fd60a101b1ae, infoPort=42429, infoSecurePort=0, ipcPort=35835, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:58:19,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:19,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:19,469 WARN [Thread-2521 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data3/current/BP-1345401396-172.17.0.2-1731578297402/current, will proceed with Du for space computation calculation, 2024-11-14T09:58:19,469 WARN [Thread-2522 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data4/current/BP-1345401396-172.17.0.2-1731578297402/current, will proceed with Du for space computation calculation, 2024-11-14T09:58:19,486 WARN [Thread-2497 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-14T09:58:19,488 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe06754ee3c10f09a with lease ID 0x6eeedfbe0260b9bb: Processing first storage report for DS-eb5212a4-db54-4568-a228-7d25187ad9f8 from datanode DatanodeRegistration(127.0.0.1:42421, datanodeUuid=72b57d5a-040e-4220-b1ff-281373022be5, infoPort=41753, infoSecurePort=0, ipcPort=36813, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402) 2024-11-14T09:58:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe06754ee3c10f09a with lease ID 0x6eeedfbe0260b9bb: from storage DS-eb5212a4-db54-4568-a228-7d25187ad9f8 node DatanodeRegistration(127.0.0.1:42421, datanodeUuid=72b57d5a-040e-4220-b1ff-281373022be5, infoPort=41753, infoSecurePort=0, ipcPort=36813, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:58:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe06754ee3c10f09a with lease ID 0x6eeedfbe0260b9bb: Processing first storage report for DS-b34c4dfa-56ee-4afb-b784-42463a1b4126 from datanode DatanodeRegistration(127.0.0.1:42421, datanodeUuid=72b57d5a-040e-4220-b1ff-281373022be5, infoPort=41753, infoSecurePort=0, ipcPort=36813, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402) 2024-11-14T09:58:19,489 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe06754ee3c10f09a with lease ID 0x6eeedfbe0260b9bb: from storage DS-b34c4dfa-56ee-4afb-b784-42463a1b4126 node DatanodeRegistration(127.0.0.1:42421, datanodeUuid=72b57d5a-040e-4220-b1ff-281373022be5, infoPort=41753, infoSecurePort=0, ipcPort=36813, storageInfo=lv=-57;cid=testClusterID;nsid=746582668;c=1731578297402), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-14T09:58:19,562 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec 2024-11-14T09:58:19,565 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/zookeeper_0, clientPort=54195, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-14T09:58:19,565 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54195 2024-11-14T09:58:19,566 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,567 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:58:19,575 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741825_1001 (size=7) 2024-11-14T09:58:19,575 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34 with version=8 2024-11-14T09:58:19,576 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:38163/user/jenkins/test-data/faad0e1c-78aa-a0d4-7207-2989dbbc3dea/hbase-staging 2024-11-14T09:58:19,578 INFO [Time-limited test {}] client.ConnectionUtils(128): master/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-14T09:58:19,578 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:58:19,579 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34993 2024-11-14T09:58:19,581 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34993 connecting to ZooKeeper ensemble=127.0.0.1:54195 2024-11-14T09:58:19,637 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:349930x0, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:58:19,637 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34993-0x10138c800570000 connected 2024-11-14T09:58:19,720 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,721 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,723 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:19,723 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34, hbase.cluster.distributed=false 2024-11-14T09:58:19,725 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:58:19,725 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34993 2024-11-14T09:58:19,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34993 2024-11-14T09:58:19,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34993 2024-11-14T09:58:19,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34993 2024-11-14T09:58:19,726 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34993 2024-11-14T09:58:19,741 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/defc576eb6b7:0 server-side Connection retries=45 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-14T09:58:19,742 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:36811 2024-11-14T09:58:19,743 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:36811 connecting to ZooKeeper ensemble=127.0.0.1:54195 2024-11-14T09:58:19,744 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,745 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,751 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:368110x0, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-14T09:58:19,752 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:19,752 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:36811-0x10138c800570001 connected 2024-11-14T09:58:19,752 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-14T09:58:19,753 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-14T09:58:19,753 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-14T09:58:19,754 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-14T09:58:19,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=36811 2024-11-14T09:58:19,754 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=36811 2024-11-14T09:58:19,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=36811 2024-11-14T09:58:19,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=36811 2024-11-14T09:58:19,755 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=36811 2024-11-14T09:58:19,769 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;defc576eb6b7:34993 2024-11-14T09:58:19,769 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/defc576eb6b7,34993,1731578299577 2024-11-14T09:58:19,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:58:19,781 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:58:19,782 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/defc576eb6b7,34993,1731578299577 2024-11-14T09:58:19,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-14T09:58:19,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:19,794 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:19,794 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-14T09:58:19,794 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/defc576eb6b7,34993,1731578299577 from backup master directory 2024-11-14T09:58:19,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/defc576eb6b7,34993,1731578299577 2024-11-14T09:58:19,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:58:19,804 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-14T09:58:19,804 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:58:19,804 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=defc576eb6b7,34993,1731578299577 2024-11-14T09:58:19,820 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/hbase.id] with ID: 2899bb71-01e1-49c7-8afb-95df7a2dc8c9 2024-11-14T09:58:19,820 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/.tmp/hbase.id 2024-11-14T09:58:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:58:19,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741826_1002 (size=42) 2024-11-14T09:58:19,826 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/.tmp/hbase.id]:[hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/hbase.id] 2024-11-14T09:58:19,836 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:19,836 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-14T09:58:19,837 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-14T09:58:19,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:19,846 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:19,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:58:19,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741827_1003 (size=196) 2024-11-14T09:58:19,852 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-14T09:58:19,853 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-14T09:58:19,853 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:58:19,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:58:19,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741828_1004 (size=1189) 2024-11-14T09:58:19,861 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store 2024-11-14T09:58:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:58:19,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741829_1005 (size=34) 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:58:19,867 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:19,867 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:19,867 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578299867Disabling compacts and flushes for region at 1731578299867Disabling writes for close at 1731578299867Writing region close event to WAL at 1731578299867Closed at 1731578299867 2024-11-14T09:58:19,868 WARN [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/.initializing 2024-11-14T09:58:19,868 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/WALs/defc576eb6b7,34993,1731578299577 2024-11-14T09:58:19,871 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C34993%2C1731578299577, suffix=, logDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/WALs/defc576eb6b7,34993,1731578299577, archiveDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/oldWALs, maxLogs=10 2024-11-14T09:58:19,871 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C34993%2C1731578299577.1731578299871 2024-11-14T09:58:19,876 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/WALs/defc576eb6b7,34993,1731578299577/defc576eb6b7%2C34993%2C1731578299577.1731578299871 2024-11-14T09:58:19,877 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41753:41753),(127.0.0.1/127.0.0.1:42429:42429)] 2024-11-14T09:58:19,877 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:58:19,878 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:58:19,878 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,878 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,879 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,881 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-14T09:58:19,881 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:19,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:19,882 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,883 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-14T09:58:19,883 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:19,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:58:19,884 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,885 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-14T09:58:19,885 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:19,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:58:19,886 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,887 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-14T09:58:19,887 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:19,888 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-14T09:58:19,888 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,889 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,890 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,891 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,891 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,892 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-14T09:58:19,893 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-14T09:58:19,895 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:58:19,896 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=695764, jitterRate=-0.11529086530208588}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-14T09:58:19,897 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1731578299878Initializing all the Stores at 1731578299879 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578299879Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578299879Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578299879Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578299879Cleaning up temporary data from old regions at 1731578299891 (+12 ms)Region opened successfully at 1731578299897 (+6 ms) 2024-11-14T09:58:19,897 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-14T09:58:19,900 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d5e77af, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:58:19,901 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-14T09:58:19,901 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-14T09:58:19,901 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-14T09:58:19,901 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-14T09:58:19,902 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-14T09:58:19,902 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-14T09:58:19,902 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-14T09:58:19,904 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-14T09:58:19,905 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-14T09:58:19,969 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-14T09:58:19,969 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-14T09:58:19,970 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-14T09:58:20,036 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-14T09:58:20,037 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-14T09:58:20,038 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-14T09:58:20,057 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-14T09:58:20,063 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-14T09:58:20,067 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-14T09:58:20,069 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-14T09:58:20,078 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-14T09:58:20,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:20,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:20,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,088 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,089 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=defc576eb6b7,34993,1731578299577, sessionid=0x10138c800570000, setting cluster-up flag (Was=false) 2024-11-14T09:58:20,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,109 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,141 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-14T09:58:20,142 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,34993,1731578299577 2024-11-14T09:58:20,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,194 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-14T09:58:20,195 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=defc576eb6b7,34993,1731578299577 2024-11-14T09:58:20,196 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-14T09:58:20,197 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-14T09:58:20,197 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-14T09:58:20,197 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-14T09:58:20,198 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: defc576eb6b7,34993,1731578299577 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/defc576eb6b7:0, corePoolSize=5, maxPoolSize=5 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/defc576eb6b7:0, corePoolSize=10, maxPoolSize=10 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:58:20,199 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1731578330200 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-14T09:58:20,200 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,201 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:58:20,201 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-14T09:58:20,201 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-14T09:58:20,202 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578300201,5,FailOnTimeoutGroup] 2024-11-14T09:58:20,202 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578300202,5,FailOnTimeoutGroup] 2024-11-14T09:58:20,202 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,202 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-14T09:58:20,202 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,202 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,202 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,202 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-14T09:58:20,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:58:20,208 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741831_1007 (size=1321) 2024-11-14T09:58:20,208 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-14T09:58:20,208 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34 2024-11-14T09:58:20,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:58:20,214 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741832_1008 (size=32) 2024-11-14T09:58:20,215 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:58:20,216 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:58:20,217 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:58:20,217 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,218 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:58:20,218 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:58:20,219 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,219 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:58:20,220 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:58:20,220 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,220 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,221 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:58:20,222 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:58:20,222 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,222 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:58:20,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740 2024-11-14T09:58:20,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740 2024-11-14T09:58:20,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:58:20,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:58:20,224 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:58:20,225 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:58:20,227 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-14T09:58:20,228 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=801981, jitterRate=0.019772708415985107}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:58:20,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1731578300215Initializing all the Stores at 1731578300216 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300216Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300216Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578300216Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300216Cleaning up temporary data from old regions at 1731578300224 (+8 ms)Region opened successfully at 1731578300228 (+4 ms) 2024-11-14T09:58:20,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:58:20,228 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:58:20,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:58:20,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:58:20,228 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:58:20,229 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:58:20,229 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578300228Disabling compacts and flushes for region at 1731578300228Disabling writes for close at 1731578300228Writing region close event to WAL at 1731578300229 (+1 ms)Closed at 1731578300229 2024-11-14T09:58:20,230 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:58:20,230 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-14T09:58:20,230 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-14T09:58:20,231 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:58:20,232 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-14T09:58:20,257 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(746): ClusterId : 2899bb71-01e1-49c7-8afb-95df7a2dc8c9 2024-11-14T09:58:20,257 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-14T09:58:20,268 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-14T09:58:20,268 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-14T09:58:20,278 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-14T09:58:20,279 DEBUG [RS:0;defc576eb6b7:36811 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2badf0e7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=defc576eb6b7/172.17.0.2:0 2024-11-14T09:58:20,292 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;defc576eb6b7:36811 2024-11-14T09:58:20,292 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-14T09:58:20,292 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-14T09:58:20,292 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-14T09:58:20,293 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(2659): reportForDuty to master=defc576eb6b7,34993,1731578299577 with port=36811, startcode=1731578299741 2024-11-14T09:58:20,293 DEBUG [RS:0;defc576eb6b7:36811 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-14T09:58:20,295 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60283, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-14T09:58:20,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34993 {}] master.ServerManager(363): Checking decommissioned status of RegionServer defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,295 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34993 {}] master.ServerManager(517): Registering regionserver=defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,296 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34 2024-11-14T09:58:20,296 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45331 2024-11-14T09:58:20,296 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-14T09:58:20,308 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:58:20,308 DEBUG [RS:0;defc576eb6b7:36811 {}] zookeeper.ZKUtil(111): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,308 WARN [RS:0;defc576eb6b7:36811 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-14T09:58:20,308 INFO [RS:0;defc576eb6b7:36811 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:58:20,308 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,308 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [defc576eb6b7,36811,1731578299741] 2024-11-14T09:58:20,312 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-14T09:58:20,314 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-14T09:58:20,314 INFO [RS:0;defc576eb6b7:36811 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-14T09:58:20,314 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,315 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-14T09:58:20,315 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-14T09:58:20,315 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/defc576eb6b7:0, corePoolSize=2, maxPoolSize=2 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/defc576eb6b7:0, corePoolSize=1, maxPoolSize=1 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:58:20,316 DEBUG [RS:0;defc576eb6b7:36811 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/defc576eb6b7:0, corePoolSize=3, maxPoolSize=3 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,317 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36811,1731578299741-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:58:20,333 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-14T09:58:20,333 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,36811,1731578299741-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,333 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,333 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.Replication(171): defc576eb6b7,36811,1731578299741 started 2024-11-14T09:58:20,346 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,346 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1482): Serving as defc576eb6b7,36811,1731578299741, RpcServer on defc576eb6b7/172.17.0.2:36811, sessionid=0x10138c800570001 2024-11-14T09:58:20,346 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-14T09:58:20,346 DEBUG [RS:0;defc576eb6b7:36811 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,346 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,36811,1731578299741' 2024-11-14T09:58:20,346 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'defc576eb6b7,36811,1731578299741' 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-14T09:58:20,347 DEBUG [RS:0;defc576eb6b7:36811 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-14T09:58:20,347 INFO [RS:0;defc576eb6b7:36811 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-14T09:58:20,347 INFO [RS:0;defc576eb6b7:36811 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-14T09:58:20,382 WARN [defc576eb6b7:34993 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-14T09:58:20,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:20,446 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:20,450 INFO [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C36811%2C1731578299741, suffix=, logDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/defc576eb6b7,36811,1731578299741, archiveDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs, maxLogs=32 2024-11-14T09:58:20,451 INFO [RS:0;defc576eb6b7:36811 {}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C36811%2C1731578299741.1731578300450 2024-11-14T09:58:20,458 INFO [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/defc576eb6b7,36811,1731578299741/defc576eb6b7%2C36811%2C1731578299741.1731578300450 2024-11-14T09:58:20,459 DEBUG [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41753:41753),(127.0.0.1/127.0.0.1:42429:42429)] 2024-11-14T09:58:20,633 DEBUG [defc576eb6b7:34993 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-14T09:58:20,633 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,635 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,36811,1731578299741, state=OPENING 2024-11-14T09:58:20,665 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-14T09:58:20,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,678 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:20,678 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-14T09:58:20,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:58:20,678 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:58:20,679 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,36811,1731578299741}] 2024-11-14T09:58:20,831 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-14T09:58:20,834 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51031, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-14T09:58:20,838 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-14T09:58:20,838 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:58:20,840 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=defc576eb6b7%2C36811%2C1731578299741.meta, suffix=.meta, logDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/defc576eb6b7,36811,1731578299741, archiveDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs, maxLogs=32 2024-11-14T09:58:20,841 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor defc576eb6b7%2C36811%2C1731578299741.meta.1731578300841.meta 2024-11-14T09:58:20,849 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/defc576eb6b7,36811,1731578299741/defc576eb6b7%2C36811%2C1731578299741.meta.1731578300841.meta 2024-11-14T09:58:20,860 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42429:42429),(127.0.0.1/127.0.0.1:41753:41753)] 2024-11-14T09:58:20,868 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-14T09:58:20,869 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-14T09:58:20,869 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-14T09:58:20,870 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-14T09:58:20,871 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-14T09:58:20,871 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,872 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-14T09:58:20,872 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-14T09:58:20,872 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,873 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-14T09:58:20,873 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-14T09:58:20,873 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,874 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-14T09:58:20,874 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-14T09:58:20,874 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-14T09:58:20,875 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-14T09:58:20,875 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-14T09:58:20,875 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740 2024-11-14T09:58:20,876 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740 2024-11-14T09:58:20,877 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-14T09:58:20,877 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-14T09:58:20,878 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-14T09:58:20,879 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-14T09:58:20,879 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=805612, jitterRate=0.02438873052597046}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-14T09:58:20,879 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-14T09:58:20,880 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1731578300869Writing region info on filesystem at 1731578300869Initializing all the Stores at 1731578300870 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300870Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300870Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1731578300870Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1731578300870Cleaning up temporary data from old regions at 1731578300877 (+7 ms)Running coprocessor post-open hooks at 1731578300880 (+3 ms)Region opened successfully at 1731578300880 2024-11-14T09:58:20,881 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1731578300831 2024-11-14T09:58:20,883 DEBUG [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-14T09:58:20,883 INFO [RS_OPEN_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-14T09:58:20,883 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,884 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as defc576eb6b7,36811,1731578299741, state=OPEN 2024-11-14T09:58:20,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:58:20,964 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-14T09:58:20,964 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=defc576eb6b7,36811,1731578299741 2024-11-14T09:58:20,964 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:58:20,964 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-14T09:58:20,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-14T09:58:20,968 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=defc576eb6b7,36811,1731578299741 in 286 msec 2024-11-14T09:58:20,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-14T09:58:20,972 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 738 msec 2024-11-14T09:58:20,973 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-14T09:58:20,973 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-14T09:58:20,974 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:58:20,974 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,36811,1731578299741, seqNum=-1] 2024-11-14T09:58:20,974 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:58:20,975 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41479, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:58:20,979 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 782 msec 2024-11-14T09:58:20,980 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1731578300980, completionTime=-1 2024-11-14T09:58:20,980 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-14T09:58:20,980 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1731578360982 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1731578420982 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-defc576eb6b7:34993, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,982 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:20,984 DEBUG [master/defc576eb6b7:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-14T09:58:20,986 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.182sec 2024-11-14T09:58:20,986 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-14T09:58:20,986 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-14T09:58:20,986 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-14T09:58:20,986 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-14T09:58:20,987 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-14T09:58:20,987 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-14T09:58:20,987 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-14T09:58:20,989 DEBUG [master/defc576eb6b7:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-14T09:58:20,989 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-14T09:58:20,989 INFO [master/defc576eb6b7:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=defc576eb6b7,34993,1731578299577-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-14T09:58:21,058 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1bdd849a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:58:21,058 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request defc576eb6b7,34993,-1 for getting cluster id 2024-11-14T09:58:21,058 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-14T09:58:21,059 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2899bb71-01e1-49c7-8afb-95df7a2dc8c9' 2024-11-14T09:58:21,059 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-14T09:58:21,059 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2899bb71-01e1-49c7-8afb-95df7a2dc8c9" 2024-11-14T09:58:21,060 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d106200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:58:21,060 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [defc576eb6b7,34993,-1] 2024-11-14T09:58:21,060 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-14T09:58:21,060 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,061 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49204, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-14T09:58:21,062 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7031f48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-14T09:58:21,062 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-14T09:58:21,063 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=defc576eb6b7,36811,1731578299741, seqNum=-1] 2024-11-14T09:58:21,064 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-14T09:58:21,065 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55972, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-14T09:58:21,067 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=defc576eb6b7,34993,1731578299577 2024-11-14T09:58:21,067 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-14T09:58:21,069 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-14T09:58:21,069 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-14T09:58:21,071 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs, maxLogs=32 2024-11-14T09:58:21,072 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731578301072 2024-11-14T09:58:21,077 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1/test.com%2C8080%2C1.1731578301072 2024-11-14T09:58:21,082 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42429:42429),(127.0.0.1/127.0.0.1:41753:41753)] 2024-11-14T09:58:21,083 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1731578301083 2024-11-14T09:58:21,088 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,089 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,089 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,089 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,089 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,089 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1/test.com%2C8080%2C1.1731578301072 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1/test.com%2C8080%2C1.1731578301083 2024-11-14T09:58:21,090 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42429:42429),(127.0.0.1/127.0.0.1:41753:41753)] 2024-11-14T09:58:21,090 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1/test.com%2C8080%2C1.1731578301072 is not closed yet, will try archiving it next time 2024-11-14T09:58:21,090 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,090 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,091 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,091 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,091 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741835_1011 (size=93) 2024-11-14T09:58:21,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741835_1011 (size=93) 2024-11-14T09:58:21,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741836_1012 (size=93) 2024-11-14T09:58:21,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741836_1012 (size=93) 2024-11-14T09:58:21,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,35821,1731578107553/defc576eb6b7%2C35821%2C1731578107553.1731578107825 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:21,447 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:37757/user/jenkins/test-data/99a23f12-ef45-9e14-f467-dfb022247527/WALs/defc576eb6b7,37839,1731578105754/defc576eb6b7%2C37839%2C1731578105754.meta.1731578107391.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-14T09:58:21,492 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/WALs/test.com,8080,1/test.com%2C8080%2C1.1731578301072 to hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs/test.com%2C8080%2C1.1731578301072 2024-11-14T09:58:21,495 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs 2024-11-14T09:58:21,495 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1731578301083) 2024-11-14T09:58:21,495 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-14T09:58:21,496 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:58:21,496 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:21,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,496 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,496 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-14T09:58:21,496 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-14T09:58:21,496 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2011798360, stopped=false 2024-11-14T09:58:21,496 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=defc576eb6b7,34993,1731578299577 2024-11-14T09:58:21,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:21,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-14T09:58:21,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:21,550 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:21,550 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:58:21,550 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-14T09:58:21,550 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:21,550 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,550 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'defc576eb6b7,36811,1731578299741' ***** 2024-11-14T09:58:21,551 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-14T09:58:21,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:21,551 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-14T09:58:21,551 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(959): stopping server defc576eb6b7,36811,1731578299741 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:58:21,551 INFO [RS:0;defc576eb6b7:36811 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;defc576eb6b7:36811. 2024-11-14T09:58:21,551 DEBUG [RS:0;defc576eb6b7:36811 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-14T09:58:21,552 DEBUG [RS:0;defc576eb6b7:36811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,552 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-14T09:58:21,552 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-14T09:58:21,552 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-14T09:58:21,552 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-14T09:58:21,556 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-14T09:58:21,556 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-14T09:58:21,556 DEBUG [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-14T09:58:21,556 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-14T09:58:21,556 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-14T09:58:21,556 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-14T09:58:21,556 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-14T09:58:21,556 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-14T09:58:21,557 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-14T09:58:21,574 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/.tmp/ns/9e254de801fc4ec29cdb2be9a859257f is 43, key is default/ns:d/1731578300976/Put/seqid=0 2024-11-14T09:58:21,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741837_1013 (size=5153) 2024-11-14T09:58:21,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741837_1013 (size=5153) 2024-11-14T09:58:21,578 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/.tmp/ns/9e254de801fc4ec29cdb2be9a859257f 2024-11-14T09:58:21,583 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/.tmp/ns/9e254de801fc4ec29cdb2be9a859257f as hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/ns/9e254de801fc4ec29cdb2be9a859257f 2024-11-14T09:58:21,588 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/ns/9e254de801fc4ec29cdb2be9a859257f, entries=2, sequenceid=6, filesize=5.0 K 2024-11-14T09:58:21,588 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false 2024-11-14T09:58:21,589 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-14T09:58:21,593 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-14T09:58:21,593 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-14T09:58:21,593 INFO [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-14T09:58:21,593 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1731578301556Running coprocessor pre-close hooks at 1731578301556Disabling compacts and flushes for region at 1731578301556Disabling writes for close at 1731578301556Obtaining lock to block concurrent updates at 1731578301557 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1731578301557Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1731578301557Flushing stores of hbase:meta,,1.1588230740 at 1731578301558 (+1 ms)Flushing 1588230740/ns: creating writer at 1731578301558Flushing 1588230740/ns: appending metadata at 1731578301573 (+15 ms)Flushing 1588230740/ns: closing flushed file at 1731578301573Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14606e7a: reopening flushed file at 1731578301582 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 32ms, sequenceid=6, compaction requested=false at 1731578301588 (+6 ms)Writing region close event to WAL at 1731578301590 (+2 ms)Running coprocessor post-close hooks at 1731578301593 (+3 ms)Closed at 1731578301593 2024-11-14T09:58:21,594 DEBUG [RS_CLOSE_META-regionserver/defc576eb6b7:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-14T09:58:21,756 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(976): stopping server defc576eb6b7,36811,1731578299741; all regions closed. 2024-11-14T09:58:21,757 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,757 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,757 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,758 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,758 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:58:21,760 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741834_1010 (size=1152) 2024-11-14T09:58:21,763 DEBUG [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs 2024-11-14T09:58:21,763 INFO [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C36811%2C1731578299741.meta:.meta(num 1731578300841) 2024-11-14T09:58:21,764 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,764 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,764 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,764 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,764 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,766 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:58:21,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741833_1009 (size=93) 2024-11-14T09:58:21,769 DEBUG [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/oldWALs 2024-11-14T09:58:21,769 INFO [RS:0;defc576eb6b7:36811 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog defc576eb6b7%2C36811%2C1731578299741:(num 1731578300450) 2024-11-14T09:58:21,769 DEBUG [RS:0;defc576eb6b7:36811 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-14T09:58:21,769 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.LeaseManager(133): Closed leases 2024-11-14T09:58:21,769 INFO [RS:0;defc576eb6b7:36811 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:58:21,770 INFO [RS:0;defc576eb6b7:36811 {}] hbase.ChoreService(370): Chore service for: regionserver/defc576eb6b7:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-14T09:58:21,770 INFO [RS:0;defc576eb6b7:36811 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:58:21,770 INFO [regionserver/defc576eb6b7:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:58:21,770 INFO [RS:0;defc576eb6b7:36811 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:36811 2024-11-14T09:58:21,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/defc576eb6b7,36811,1731578299741 2024-11-14T09:58:21,783 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-14T09:58:21,783 INFO [RS:0;defc576eb6b7:36811 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:58:21,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,792 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,793 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,794 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [defc576eb6b7,36811,1731578299741] 2024-11-14T09:58:21,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,795 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,804 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/defc576eb6b7,36811,1731578299741 already deleted, retry=false 2024-11-14T09:58:21,804 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; defc576eb6b7,36811,1731578299741 expired; onlineServers=0 2024-11-14T09:58:21,804 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'defc576eb6b7,34993,1731578299577' ***** 2024-11-14T09:58:21,804 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-14T09:58:21,804 INFO [M:0;defc576eb6b7:34993 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-14T09:58:21,804 INFO [M:0;defc576eb6b7:34993 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-14T09:58:21,804 DEBUG [M:0;defc576eb6b7:34993 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-14T09:58:21,804 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-14T09:58:21,804 DEBUG [M:0;defc576eb6b7:34993 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-14T09:58:21,804 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578300201 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.large.0-1731578300201,5,FailOnTimeoutGroup] 2024-11-14T09:58:21,804 DEBUG [master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578300202 {}] cleaner.HFileCleaner(306): Exit Thread[master/defc576eb6b7:0:becomeActiveMaster-HFileCleaner.small.0-1731578300202,5,FailOnTimeoutGroup] 2024-11-14T09:58:21,805 INFO [M:0;defc576eb6b7:34993 {}] hbase.ChoreService(370): Chore service for: master/defc576eb6b7:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-14T09:58:21,805 INFO [M:0;defc576eb6b7:34993 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-14T09:58:21,805 DEBUG [M:0;defc576eb6b7:34993 {}] master.HMaster(1795): Stopping service threads 2024-11-14T09:58:21,805 INFO [M:0;defc576eb6b7:34993 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-14T09:58:21,805 INFO [M:0;defc576eb6b7:34993 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-14T09:58:21,805 INFO [M:0;defc576eb6b7:34993 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-14T09:58:21,805 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-14T09:58:21,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-14T09:58:21,814 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-14T09:58:21,815 DEBUG [M:0;defc576eb6b7:34993 {}] zookeeper.ZKUtil(347): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-14T09:58:21,815 WARN [M:0;defc576eb6b7:34993 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-14T09:58:21,815 INFO [M:0;defc576eb6b7:34993 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/.lastflushedseqids 2024-11-14T09:58:21,819 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741838_1014 (size=99) 2024-11-14T09:58:21,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741838_1014 (size=99) 2024-11-14T09:58:21,820 INFO [M:0;defc576eb6b7:34993 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-14T09:58:21,820 INFO [M:0;defc576eb6b7:34993 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-14T09:58:21,820 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-14T09:58:21,820 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:21,820 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:21,820 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-14T09:58:21,820 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:21,821 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-14T09:58:21,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,827 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,828 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,833 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,834 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,835 DEBUG [M:0;defc576eb6b7:34993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc76aa1b5c984487a64fc1a7411526d8 is 82, key is hbase:meta,,1/info:regioninfo/1731578300883/Put/seqid=0 2024-11-14T09:58:21,836 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-14T09:58:21,839 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741839_1015 (size=5672) 2024-11-14T09:58:21,840 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741839_1015 (size=5672) 2024-11-14T09:58:21,840 INFO [M:0;defc576eb6b7:34993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc76aa1b5c984487a64fc1a7411526d8 2024-11-14T09:58:21,859 DEBUG [M:0;defc576eb6b7:34993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65d9660f06144e459aa84c1e72704304 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1731578300979/Put/seqid=0 2024-11-14T09:58:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741840_1016 (size=5275) 2024-11-14T09:58:21,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741840_1016 (size=5275) 2024-11-14T09:58:21,864 INFO [M:0;defc576eb6b7:34993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65d9660f06144e459aa84c1e72704304 2024-11-14T09:58:21,883 DEBUG [M:0;defc576eb6b7:34993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b5393f6c36b04a0b911c2fbdc4fa9c26 is 69, key is defc576eb6b7,36811,1731578299741/rs:state/1731578300295/Put/seqid=0 2024-11-14T09:58:21,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741841_1017 (size=5156) 2024-11-14T09:58:21,888 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741841_1017 (size=5156) 2024-11-14T09:58:21,889 INFO [M:0;defc576eb6b7:34993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b5393f6c36b04a0b911c2fbdc4fa9c26 2024-11-14T09:58:21,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:21,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:36811-0x10138c800570001, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:21,894 INFO [RS:0;defc576eb6b7:36811 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:58:21,894 INFO [RS:0;defc576eb6b7:36811 {}] regionserver.HRegionServer(1031): Exiting; stopping=defc576eb6b7,36811,1731578299741; zookeeper connection closed. 2024-11-14T09:58:21,894 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@76cfbd44 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@76cfbd44 2024-11-14T09:58:21,894 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-14T09:58:21,907 DEBUG [M:0;defc576eb6b7:34993 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39fa225e208f40dcbf1c9dc7780cdd38 is 52, key is load_balancer_on/state:d/1731578301068/Put/seqid=0 2024-11-14T09:58:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741842_1018 (size=5056) 2024-11-14T09:58:21,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741842_1018 (size=5056) 2024-11-14T09:58:21,911 INFO [M:0;defc576eb6b7:34993 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39fa225e208f40dcbf1c9dc7780cdd38 2024-11-14T09:58:21,916 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fc76aa1b5c984487a64fc1a7411526d8 as hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc76aa1b5c984487a64fc1a7411526d8 2024-11-14T09:58:21,921 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fc76aa1b5c984487a64fc1a7411526d8, entries=8, sequenceid=29, filesize=5.5 K 2024-11-14T09:58:21,922 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/65d9660f06144e459aa84c1e72704304 as hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/65d9660f06144e459aa84c1e72704304 2024-11-14T09:58:21,926 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/65d9660f06144e459aa84c1e72704304, entries=3, sequenceid=29, filesize=5.2 K 2024-11-14T09:58:21,926 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/b5393f6c36b04a0b911c2fbdc4fa9c26 as hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b5393f6c36b04a0b911c2fbdc4fa9c26 2024-11-14T09:58:21,931 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/b5393f6c36b04a0b911c2fbdc4fa9c26, entries=1, sequenceid=29, filesize=5.0 K 2024-11-14T09:58:21,932 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/39fa225e208f40dcbf1c9dc7780cdd38 as hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39fa225e208f40dcbf1c9dc7780cdd38 2024-11-14T09:58:21,936 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45331/user/jenkins/test-data/0ec0dfc7-cd06-b0ac-570b-f565cfe49a34/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/39fa225e208f40dcbf1c9dc7780cdd38, entries=1, sequenceid=29, filesize=4.9 K 2024-11-14T09:58:21,937 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=29, compaction requested=false 2024-11-14T09:58:21,939 INFO [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-14T09:58:21,939 DEBUG [M:0;defc576eb6b7:34993 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1731578301820Disabling compacts and flushes for region at 1731578301820Disabling writes for close at 1731578301820Obtaining lock to block concurrent updates at 1731578301821 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1731578301821Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1731578301821Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1731578301821Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1731578301821Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1731578301835 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1731578301835Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1731578301844 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1731578301859 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1731578301859Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1731578301868 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1731578301883 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1731578301883Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1731578301893 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1731578301906 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1731578301906Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1eace41d: reopening flushed file at 1731578301916 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61a891cd: reopening flushed file at 1731578301921 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a8afee0: reopening flushed file at 1731578301926 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7a26d4a9: reopening flushed file at 1731578301931 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 117ms, sequenceid=29, compaction requested=false at 1731578301937 (+6 ms)Writing region close event to WAL at 1731578301939 (+2 ms)Closed at 1731578301939 2024-11-14T09:58:21,939 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,939 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,939 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,939 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,939 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-14T09:58:21,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34635 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:58:21,941 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42421 is added to blk_1073741830_1006 (size=10311) 2024-11-14T09:58:21,942 INFO [M:0;defc576eb6b7:34993 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-14T09:58:21,942 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-14T09:58:21,942 INFO [M:0;defc576eb6b7:34993 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34993 2024-11-14T09:58:21,942 INFO [M:0;defc576eb6b7:34993 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-14T09:58:22,051 INFO [M:0;defc576eb6b7:34993 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-14T09:58:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:22,052 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34993-0x10138c800570000, quorum=127.0.0.1:54195, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-14T09:58:22,054 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@420a76f4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:22,055 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5f6d8bd9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:22,055 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:22,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@78e9eb81{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:22,055 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20f48718{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:22,056 WARN [BP-1345401396-172.17.0.2-1731578297402 heartbeating to localhost/127.0.0.1:45331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:58:22,056 WARN [BP-1345401396-172.17.0.2-1731578297402 heartbeating to localhost/127.0.0.1:45331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1345401396-172.17.0.2-1731578297402 (Datanode Uuid 72b57d5a-040e-4220-b1ff-281373022be5) service to localhost/127.0.0.1:45331 2024-11-14T09:58:22,056 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:58:22,056 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:58:22,057 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data3/current/BP-1345401396-172.17.0.2-1731578297402 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:22,057 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data4/current/BP-1345401396-172.17.0.2-1731578297402 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:22,057 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:58:22,059 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@54697ec6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-14T09:58:22,059 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4b4f311b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:22,060 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:22,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1db48135{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:22,060 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5e513ee6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:22,061 WARN [BP-1345401396-172.17.0.2-1731578297402 heartbeating to localhost/127.0.0.1:45331 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-14T09:58:22,061 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-14T09:58:22,061 WARN [BP-1345401396-172.17.0.2-1731578297402 heartbeating to localhost/127.0.0.1:45331 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1345401396-172.17.0.2-1731578297402 (Datanode Uuid 45ea8ec0-a953-474c-9765-fd60a101b1ae) service to localhost/127.0.0.1:45331 2024-11-14T09:58:22,061 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-14T09:58:22,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data2/current/BP-1345401396-172.17.0.2-1731578297402 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:22,062 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/cluster_9eb6a2b3-305c-a050-cf5d-def2f5e86fd9/data/data1/current/BP-1345401396-172.17.0.2-1731578297402 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-14T09:58:22,062 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-14T09:58:22,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2b790cdd{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-14T09:58:22,067 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1bd8a44a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-14T09:58:22,067 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-14T09:58:22,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@233bbfea{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-14T09:58:22,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@426614f2{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/6ca4557b-19ef-9f97-6a06-5a66ffce0aec/hadoop.log.dir/,STOPPED} 2024-11-14T09:58:22,074 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-14T09:58:22,088 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-14T09:58:22,097 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=267 (was 229) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45331 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45331 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45331 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45331 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45331 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45331 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=538 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=228 (was 230), ProcessCount=11 (was 11), AvailableMemoryMB=3457 (was 3471)