2024-11-19 12:45:07,181 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 12:45:07,199 main DEBUG Took 0.015680 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-19 12:45:07,200 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-19 12:45:07,200 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-19 12:45:07,202 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-19 12:45:07,203 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,212 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-19 12:45:07,229 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,231 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,231 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,232 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,232 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,233 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,234 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,234 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,235 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,236 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,237 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,237 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,238 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,238 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,239 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,239 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,240 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,240 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,241 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,241 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,242 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,242 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,243 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,243 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-19 12:45:07,244 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,244 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-19 12:45:07,246 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-19 12:45:07,248 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-19 12:45:07,250 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-19 12:45:07,251 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-19 12:45:07,253 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-19 12:45:07,253 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-19 12:45:07,264 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-19 12:45:07,267 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-19 12:45:07,270 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-19 12:45:07,270 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-19 12:45:07,271 main DEBUG createAppenders(={Console}) 2024-11-19 12:45:07,272 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-19 12:45:07,272 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-19 12:45:07,273 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-19 12:45:07,273 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-19 12:45:07,274 main DEBUG OutputStream closed 2024-11-19 12:45:07,274 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-19 12:45:07,275 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-19 12:45:07,275 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-19 12:45:07,367 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-19 12:45:07,370 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-19 12:45:07,371 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-19 12:45:07,373 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-19 12:45:07,374 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-19 12:45:07,375 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-19 12:45:07,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-19 12:45:07,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-19 12:45:07,376 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-19 12:45:07,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-19 12:45:07,377 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-19 12:45:07,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-19 12:45:07,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-19 12:45:07,378 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-19 12:45:07,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-19 12:45:07,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-19 12:45:07,379 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-19 12:45:07,380 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-19 12:45:07,382 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19 12:45:07,383 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-19 12:45:07,384 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-19 12:45:07,385 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-19T12:45:07,664 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e 2024-11-19 12:45:07,666 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-19 12:45:07,667 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-19T12:45:07,676 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-19T12:45:07,715 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=348, ProcessCount=11, AvailableMemoryMB=7176 2024-11-19T12:45:07,718 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:45:07,736 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6, deleteOnExit=true 2024-11-19T12:45:07,737 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:45:07,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/test.cache.data in system properties and HBase conf 2024-11-19T12:45:07,739 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:45:07,740 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:45:07,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:45:07,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:45:07,741 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:45:07,840 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-19T12:45:07,953 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:45:07,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:45:07,957 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:45:07,958 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:45:07,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:45:07,959 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:45:07,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:45:07,960 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:45:07,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:45:07,961 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:45:07,962 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:45:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:45:07,963 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:45:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:45:07,964 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:45:08,492 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:45:09,022 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T12:45:09,107 INFO [Time-limited test {}] log.Log(170): Logging initialized @2804ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-19T12:45:09,191 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:09,259 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:09,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:09,292 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:09,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:45:09,308 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:09,311 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:09,313 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:09,534 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4c77270f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/java.io.tmpdir/jetty-localhost-37903-hadoop-hdfs-3_4_1-tests_jar-_-any-17743391495006991363/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:45:09,544 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:37903} 2024-11-19T12:45:09,544 INFO [Time-limited test {}] server.Server(415): Started @3242ms 2024-11-19T12:45:09,575 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:45:10,228 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:10,240 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:10,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:10,243 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:10,243 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:45:10,244 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:10,245 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:10,375 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@59e63bea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/java.io.tmpdir/jetty-localhost-43037-hadoop-hdfs-3_4_1-tests_jar-_-any-15191802552349513191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:10,376 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:43037} 2024-11-19T12:45:10,376 INFO [Time-limited test {}] server.Server(415): Started @4074ms 2024-11-19T12:45:10,436 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:45:10,606 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:45:10,615 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:45:10,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:45:10,619 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:45:10,620 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:45:10,623 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:45:10,624 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:45:10,769 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55d18735{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/java.io.tmpdir/jetty-localhost-40623-hadoop-hdfs-3_4_1-tests_jar-_-any-1037876770983068202/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:45:10,770 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:40623} 2024-11-19T12:45:10,770 INFO [Time-limited test {}] server.Server(415): Started @4468ms 2024-11-19T12:45:10,774 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:45:11,773 WARN [Thread-98 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data1/current/BP-509242514-172.17.0.2-1732020308604/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:11,773 WARN [Thread-100 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data3/current/BP-509242514-172.17.0.2-1732020308604/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:11,773 WARN [Thread-99 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data2/current/BP-509242514-172.17.0.2-1732020308604/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:11,774 WARN [Thread-101 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data4/current/BP-509242514-172.17.0.2-1732020308604/current, will proceed with Du for space computation calculation, 2024-11-19T12:45:11,836 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:45:11,845 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:45:11,899 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b6ac41246c4cfbf with lease ID 0x25a13cd0841b1dc2: Processing first storage report for DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59 from datanode DatanodeRegistration(127.0.0.1:40091, datanodeUuid=ca0489c0-bffa-42bf-94d7-a005e65e8750, infoPort=46539, infoSecurePort=0, ipcPort=32959, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604) 2024-11-19T12:45:11,901 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b6ac41246c4cfbf with lease ID 0x25a13cd0841b1dc2: from storage DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59 node DatanodeRegistration(127.0.0.1:40091, datanodeUuid=ca0489c0-bffa-42bf-94d7-a005e65e8750, infoPort=46539, infoSecurePort=0, ipcPort=32959, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604), blocks: 0, hasStaleStorage: true, processing time: 2 msecs, invalidatedBlocks: 0 2024-11-19T12:45:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2cacabd940b62288 with lease ID 0x25a13cd0841b1dc3: Processing first storage report for DS-f9d7d828-f200-4799-a30f-cf8176737dbb from datanode DatanodeRegistration(127.0.0.1:42099, datanodeUuid=aa502fa1-9332-4ba0-883a-b866ff71d3be, infoPort=44229, infoSecurePort=0, ipcPort=40859, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604) 2024-11-19T12:45:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cacabd940b62288 with lease ID 0x25a13cd0841b1dc3: from storage DS-f9d7d828-f200-4799-a30f-cf8176737dbb node DatanodeRegistration(127.0.0.1:42099, datanodeUuid=aa502fa1-9332-4ba0-883a-b866ff71d3be, infoPort=44229, infoSecurePort=0, ipcPort=40859, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:11,902 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8b6ac41246c4cfbf with lease ID 0x25a13cd0841b1dc2: Processing first storage report for DS-12411538-7b80-4680-a486-d894362430c1 from datanode DatanodeRegistration(127.0.0.1:40091, datanodeUuid=ca0489c0-bffa-42bf-94d7-a005e65e8750, infoPort=46539, infoSecurePort=0, ipcPort=32959, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604) 2024-11-19T12:45:11,903 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8b6ac41246c4cfbf with lease ID 0x25a13cd0841b1dc2: from storage DS-12411538-7b80-4680-a486-d894362430c1 node DatanodeRegistration(127.0.0.1:40091, datanodeUuid=ca0489c0-bffa-42bf-94d7-a005e65e8750, infoPort=46539, infoSecurePort=0, ipcPort=32959, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:45:11,903 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2cacabd940b62288 with lease ID 0x25a13cd0841b1dc3: Processing first storage report for DS-d9f8f10f-c7f9-4af7-aa16-f41c383862e4 from datanode DatanodeRegistration(127.0.0.1:42099, datanodeUuid=aa502fa1-9332-4ba0-883a-b866ff71d3be, infoPort=44229, infoSecurePort=0, ipcPort=40859, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604) 2024-11-19T12:45:11,903 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2cacabd940b62288 with lease ID 0x25a13cd0841b1dc3: from storage DS-d9f8f10f-c7f9-4af7-aa16-f41c383862e4 node DatanodeRegistration(127.0.0.1:42099, datanodeUuid=aa502fa1-9332-4ba0-883a-b866ff71d3be, infoPort=44229, infoSecurePort=0, ipcPort=40859, storageInfo=lv=-57;cid=testClusterID;nsid=802649033;c=1732020308604), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:45:11,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e 2024-11-19T12:45:12,031 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/zookeeper_0, clientPort=60987, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:45:12,041 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=60987 2024-11-19T12:45:12,055 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:12,059 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:12,324 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:45:12,326 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:45:12,735 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc with version=8 2024-11-19T12:45:12,736 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:45:12,821 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-19T12:45:13,105 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:13,120 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,121 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,127 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:13,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,128 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:13,317 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:45:13,378 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-19T12:45:13,389 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-19T12:45:13,394 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:13,422 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 20836 (auto-detected) 2024-11-19T12:45:13,423 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-19T12:45:13,441 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39801 2024-11-19T12:45:13,461 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39801 connecting to ZooKeeper ensemble=127.0.0.1:60987 2024-11-19T12:45:13,586 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398010x0, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:13,588 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39801-0x101546a2a8f0000 connected 2024-11-19T12:45:13,673 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:13,674 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:13,683 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:13,687 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc, hbase.cluster.distributed=false 2024-11-19T12:45:13,707 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:13,711 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39801 2024-11-19T12:45:13,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39801 2024-11-19T12:45:13,712 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39801 2024-11-19T12:45:13,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39801 2024-11-19T12:45:13,713 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39801 2024-11-19T12:45:13,820 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:45:13,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,822 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:45:13,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:45:13,822 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:45:13,825 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:45:13,827 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:45:13,829 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39285 2024-11-19T12:45:13,831 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39285 connecting to ZooKeeper ensemble=127.0.0.1:60987 2024-11-19T12:45:13,832 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:13,836 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:13,854 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:392850x0, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:45:13,855 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:392850x0, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:45:13,855 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39285-0x101546a2a8f0001 connected 2024-11-19T12:45:13,859 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:45:13,866 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:45:13,869 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:45:13,874 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:45:13,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-19T12:45:13,875 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39285 2024-11-19T12:45:13,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39285 2024-11-19T12:45:13,876 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-19T12:45:13,877 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39285 2024-11-19T12:45:13,890 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:39801 2024-11-19T12:45:13,891 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,39801,1732020312906 2024-11-19T12:45:13,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:13,905 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:13,906 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,39801,1732020312906 2024-11-19T12:45:13,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:45:13,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:13,936 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:13,937 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:45:13,938 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,39801,1732020312906 from backup master directory 2024-11-19T12:45:13,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:13,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,39801,1732020312906 2024-11-19T12:45:13,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:45:13,947 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:13,947 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,39801,1732020312906 2024-11-19T12:45:13,949 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-19T12:45:13,951 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-19T12:45:14,000 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase.id] with ID: d41cf45c-0202-48d5-8785-a1cb32c15fb9 2024-11-19T12:45:14,001 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/.tmp/hbase.id 2024-11-19T12:45:14,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:45:14,013 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:45:14,014 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/.tmp/hbase.id]:[hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase.id] 2024-11-19T12:45:14,062 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:14,068 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:45:14,092 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 22ms. 2024-11-19T12:45:14,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,111 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:45:14,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:45:14,147 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:45:14,149 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:45:14,155 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:14,186 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:45:14,187 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:45:14,202 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store 2024-11-19T12:45:14,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:45:14,219 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:45:14,225 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-19T12:45:14,229 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:14,230 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:45:14,231 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:14,231 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:14,233 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:45:14,233 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:14,234 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:45:14,235 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020314230Disabling compacts and flushes for region at 1732020314230Disabling writes for close at 1732020314233 (+3 ms)Writing region close event to WAL at 1732020314234 (+1 ms)Closed at 1732020314234 2024-11-19T12:45:14,238 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/.initializing 2024-11-19T12:45:14,238 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/WALs/aba5a916dfea,39801,1732020312906 2024-11-19T12:45:14,260 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39801%2C1732020312906, suffix=, logDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/WALs/aba5a916dfea,39801,1732020312906, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/oldWALs, maxLogs=10 2024-11-19T12:45:14,270 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39801%2C1732020312906.1732020314266 2024-11-19T12:45:14,292 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/WALs/aba5a916dfea,39801,1732020312906/aba5a916dfea%2C39801%2C1732020312906.1732020314266 2024-11-19T12:45:14,299 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46539:46539),(127.0.0.1/127.0.0.1:44229:44229)] 2024-11-19T12:45:14,301 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:14,301 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:14,304 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,305 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,339 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,361 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:45:14,365 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,367 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:14,368 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,371 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:45:14,371 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:14,372 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,375 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:45:14,375 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:14,376 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,378 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:45:14,379 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,380 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:14,380 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,384 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,385 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,390 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,390 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,394 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:45:14,398 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:45:14,402 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:14,403 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=824156, jitterRate=0.04796920716762543}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:45:14,411 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020314317Initializing all the Stores at 1732020314319 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020314319Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020314320 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020314320Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020314320Cleaning up temporary data from old regions at 1732020314391 (+71 ms)Region opened successfully at 1732020314410 (+19 ms) 2024-11-19T12:45:14,412 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:45:14,440 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@41d55699, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:14,465 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:45:14,475 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:45:14,475 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:45:14,478 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:45:14,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-19T12:45:14,485 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 4 msec 2024-11-19T12:45:14,485 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:45:14,509 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:45:14,518 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:45:14,554 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:45:14,557 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:45:14,560 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:45:14,571 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:45:14,574 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:45:14,578 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:45:14,587 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:45:14,589 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:45:14,596 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:45:14,615 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:45:14,620 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:45:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:45:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,629 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,632 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,39801,1732020312906, sessionid=0x101546a2a8f0000, setting cluster-up flag (Was=false) 2024-11-19T12:45:14,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,662 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,687 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:45:14,689 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,39801,1732020312906 2024-11-19T12:45:14,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,712 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:14,737 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:45:14,740 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,39801,1732020312906 2024-11-19T12:45:14,747 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:45:14,781 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(746): ClusterId : d41cf45c-0202-48d5-8785-a1cb32c15fb9 2024-11-19T12:45:14,784 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:45:14,811 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:14,813 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:45:14,813 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:45:14,820 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:45:14,822 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:45:14,822 DEBUG [RS:0;aba5a916dfea:39285 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3a2f0fa7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:45:14,826 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:45:14,831 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,39801,1732020312906 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:45:14,837 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:39285 2024-11-19T12:45:14,838 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:14,838 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:14,838 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:14,838 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:45:14,839 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:45:14,839 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:14,839 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:14,839 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:14,840 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:45:14,840 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:45:14,840 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:45:14,842 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,39801,1732020312906 with port=39285, startcode=1732020313788 2024-11-19T12:45:14,844 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020344844 2024-11-19T12:45:14,845 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:14,846 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:45:14,846 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:45:14,847 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:45:14,851 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:45:14,852 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:45:14,852 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,852 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:45:14,852 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:45:14,852 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:45:14,855 DEBUG [RS:0;aba5a916dfea:39285 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:45:14,854 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:14,858 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:45:14,859 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:45:14,860 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:45:14,862 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:45:14,862 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:45:14,867 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020314863,5,FailOnTimeoutGroup] 2024-11-19T12:45:14,867 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020314867,5,FailOnTimeoutGroup] 2024-11-19T12:45:14,867 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:14,868 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:45:14,869 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:14,869 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:14,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:45:14,870 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:45:14,873 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:45:14,873 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc 2024-11-19T12:45:14,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:45:14,887 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:45:14,889 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:14,892 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:45:14,894 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:45:14,895 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:14,896 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:45:14,898 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:45:14,899 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,900 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:14,901 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:45:14,904 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:45:14,904 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:14,906 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:45:14,908 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:45:14,909 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:14,910 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:14,910 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:45:14,911 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740 2024-11-19T12:45:14,912 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740 2024-11-19T12:45:14,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:45:14,915 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:45:14,916 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:45:14,918 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:45:14,922 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:14,923 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=857881, jitterRate=0.09085269272327423}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:45:14,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020314889Initializing all the Stores at 1732020314891 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020314891Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020314891Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020314891Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020314892 (+1 ms)Cleaning up temporary data from old regions at 1732020314915 (+23 ms)Region opened successfully at 1732020314928 (+13 ms) 2024-11-19T12:45:14,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:45:14,929 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:45:14,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:45:14,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:45:14,929 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:45:14,932 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:45:14,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020314929Disabling compacts and flushes for region at 1732020314929Disabling writes for close at 1732020314929Writing region close event to WAL at 1732020314931 (+2 ms)Closed at 1732020314932 (+1 ms) 2024-11-19T12:45:14,933 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52381, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:45:14,936 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:14,936 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:45:14,940 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39801 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,39285,1732020313788 2024-11-19T12:45:14,942 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:45:14,943 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39801 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:14,951 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:45:14,954 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:45:14,960 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc 2024-11-19T12:45:14,960 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44083 2024-11-19T12:45:14,960 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:45:14,979 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:45:14,980 DEBUG [RS:0;aba5a916dfea:39285 {}] zookeeper.ZKUtil(111): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,39285,1732020313788 2024-11-19T12:45:14,980 WARN [RS:0;aba5a916dfea:39285 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:45:14,980 INFO [RS:0;aba5a916dfea:39285 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:14,980 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788 2024-11-19T12:45:14,983 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,39285,1732020313788] 2024-11-19T12:45:15,007 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:45:15,023 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:45:15,027 INFO [RS:0;aba5a916dfea:39285 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:45:15,027 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,028 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:45:15,033 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:45:15,035 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,035 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,035 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,035 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,036 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,037 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,037 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,037 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:45:15,037 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:15,037 DEBUG [RS:0;aba5a916dfea:39285 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:45:15,038 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,038 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,038 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,038 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,039 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,039 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39285,1732020313788-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:15,056 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:45:15,058 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39285,1732020313788-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,058 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,058 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.Replication(171): aba5a916dfea,39285,1732020313788 started 2024-11-19T12:45:15,075 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,075 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,39285,1732020313788, RpcServer on aba5a916dfea/172.17.0.2:39285, sessionid=0x101546a2a8f0001 2024-11-19T12:45:15,076 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:45:15,076 DEBUG [RS:0;aba5a916dfea:39285 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,39285,1732020313788 2024-11-19T12:45:15,077 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39285,1732020313788' 2024-11-19T12:45:15,077 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:45:15,078 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:45:15,078 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:45:15,078 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:45:15,079 DEBUG [RS:0;aba5a916dfea:39285 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,39285,1732020313788 2024-11-19T12:45:15,079 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39285,1732020313788' 2024-11-19T12:45:15,079 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:45:15,080 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:45:15,080 DEBUG [RS:0;aba5a916dfea:39285 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:45:15,080 INFO [RS:0;aba5a916dfea:39285 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:45:15,080 INFO [RS:0;aba5a916dfea:39285 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:45:15,105 WARN [aba5a916dfea:39801 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:45:15,191 INFO [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39285%2C1732020313788, suffix=, logDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs, maxLogs=32 2024-11-19T12:45:15,195 INFO [RS:0;aba5a916dfea:39285 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020315195 2024-11-19T12:45:15,203 INFO [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020315195 2024-11-19T12:45:15,205 DEBUG [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46539:46539),(127.0.0.1/127.0.0.1:44229:44229)] 2024-11-19T12:45:15,358 DEBUG [aba5a916dfea:39801 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:45:15,371 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:15,376 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39285,1732020313788, state=OPENING 2024-11-19T12:45:15,404 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:45:15,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:15,412 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:45:15,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:15,414 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:15,417 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:45:15,420 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39285,1732020313788}] 2024-11-19T12:45:15,603 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:45:15,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33255, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:45:15,617 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:45:15,618 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:45:15,622 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39285%2C1732020313788.meta, suffix=.meta, logDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788, archiveDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs, maxLogs=32 2024-11-19T12:45:15,625 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.meta.1732020315625.meta 2024-11-19T12:45:15,632 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.meta.1732020315625.meta 2024-11-19T12:45:15,634 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:45:15,636 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:15,638 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:45:15,640 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:45:15,644 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:45:15,648 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:45:15,649 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:15,649 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:45:15,649 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:45:15,652 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:45:15,654 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:45:15,654 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:15,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:15,655 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:45:15,657 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:45:15,657 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:15,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:15,658 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:45:15,660 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:45:15,660 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:15,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:15,661 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:45:15,663 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:45:15,663 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:15,664 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:45:15,664 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:45:15,666 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740 2024-11-19T12:45:15,668 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740 2024-11-19T12:45:15,671 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:45:15,671 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:45:15,672 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:45:15,675 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:45:15,676 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=719652, jitterRate=-0.0849158763885498}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:45:15,676 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:45:15,677 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020315650Writing region info on filesystem at 1732020315650Initializing all the Stores at 1732020315651 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020315651Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020315652 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020315652Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020315652Cleaning up temporary data from old regions at 1732020315671 (+19 ms)Running coprocessor post-open hooks at 1732020315676 (+5 ms)Region opened successfully at 1732020315677 (+1 ms) 2024-11-19T12:45:15,683 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020315592 2024-11-19T12:45:15,695 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:45:15,695 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:45:15,697 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:15,700 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39285,1732020313788, state=OPEN 2024-11-19T12:45:15,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:15,766 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:45:15,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:15,767 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:45:15,767 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:15,773 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:45:15,774 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39285,1732020313788 in 348 msec 2024-11-19T12:45:15,781 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:45:15,782 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 834 msec 2024-11-19T12:45:15,783 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:45:15,784 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:45:15,801 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:45:15,802 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39285,1732020313788, seqNum=-1] 2024-11-19T12:45:15,819 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:45:15,821 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55827, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:45:15,842 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.0660 sec 2024-11-19T12:45:15,842 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020315842, completionTime=-1 2024-11-19T12:45:15,846 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:45:15,846 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:45:15,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:45:15,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020375875 2024-11-19T12:45:15,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020435875 2024-11-19T12:45:15,875 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 29 msec 2024-11-19T12:45:15,878 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,879 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,879 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,881 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:39801, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,881 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,882 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:15,887 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:45:15,907 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.960sec 2024-11-19T12:45:15,908 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:45:15,909 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:45:15,911 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:45:15,911 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:45:15,911 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:45:15,912 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:45:15,913 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:45:15,922 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:45:15,922 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:45:15,923 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39801,1732020312906-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:45:16,016 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6a88365d, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:16,018 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-19T12:45:16,019 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-19T12:45:16,023 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,39801,-1 for getting cluster id 2024-11-19T12:45:16,026 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:45:16,035 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'd41cf45c-0202-48d5-8785-a1cb32c15fb9' 2024-11-19T12:45:16,041 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:45:16,042 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "d41cf45c-0202-48d5-8785-a1cb32c15fb9" 2024-11-19T12:45:16,044 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27bbb91a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:16,044 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,39801,-1] 2024-11-19T12:45:16,047 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:45:16,049 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:45:16,050 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58716, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:45:16,054 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1637bc3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:45:16,055 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:45:16,064 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39285,1732020313788, seqNum=-1] 2024-11-19T12:45:16,064 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:45:16,067 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:45:16,086 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,39801,1732020312906 2024-11-19T12:45:16,086 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:45:16,092 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:45:16,095 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:45:16,100 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,39801,1732020312906 2024-11-19T12:45:16,102 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@66649c1a 2024-11-19T12:45:16,104 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:45:16,106 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58732, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:45:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T12:45:16,108 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T12:45:16,111 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:45:16,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-19T12:45:16,121 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:45:16,123 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-19T12:45:16,123 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:16,125 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:45:16,127 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:16,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741835_1011 (size=389) 2024-11-19T12:45:16,154 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741835_1011 (size=389) 2024-11-19T12:45:16,158 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0bd843196e5e072ea49d96974d03f9a9, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc 2024-11-19T12:45:16,169 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741836_1012 (size=72) 2024-11-19T12:45:16,170 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741836_1012 (size=72) 2024-11-19T12:45:16,170 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:16,171 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0bd843196e5e072ea49d96974d03f9a9, disabling compactions & flushes 2024-11-19T12:45:16,171 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,171 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,171 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. after waiting 0 ms 2024-11-19T12:45:16,171 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,171 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,171 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0bd843196e5e072ea49d96974d03f9a9: Waiting for close lock at 1732020316170Disabling compacts and flushes for region at 1732020316170Disabling writes for close at 1732020316171 (+1 ms)Writing region close event to WAL at 1732020316171Closed at 1732020316171 2024-11-19T12:45:16,173 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:45:16,177 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732020316173"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020316173"}]},"ts":"1732020316173"} 2024-11-19T12:45:16,182 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:45:16,184 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:45:16,187 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020316184"}]},"ts":"1732020316184"} 2024-11-19T12:45:16,191 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-19T12:45:16,192 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0bd843196e5e072ea49d96974d03f9a9, ASSIGN}] 2024-11-19T12:45:16,195 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0bd843196e5e072ea49d96974d03f9a9, ASSIGN 2024-11-19T12:45:16,197 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0bd843196e5e072ea49d96974d03f9a9, ASSIGN; state=OFFLINE, location=aba5a916dfea,39285,1732020313788; forceNewPlan=false, retain=false 2024-11-19T12:45:16,350 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0bd843196e5e072ea49d96974d03f9a9, regionState=OPENING, regionLocation=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:16,358 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0bd843196e5e072ea49d96974d03f9a9, ASSIGN because future has completed 2024-11-19T12:45:16,359 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bd843196e5e072ea49d96974d03f9a9, server=aba5a916dfea,39285,1732020313788}] 2024-11-19T12:45:16,523 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,523 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0bd843196e5e072ea49d96974d03f9a9, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:45:16,524 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,524 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:45:16,524 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,524 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,527 INFO [StoreOpener-0bd843196e5e072ea49d96974d03f9a9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,530 INFO [StoreOpener-0bd843196e5e072ea49d96974d03f9a9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0bd843196e5e072ea49d96974d03f9a9 columnFamilyName info 2024-11-19T12:45:16,531 DEBUG [StoreOpener-0bd843196e5e072ea49d96974d03f9a9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:45:16,532 INFO [StoreOpener-0bd843196e5e072ea49d96974d03f9a9-1 {}] regionserver.HStore(327): Store=0bd843196e5e072ea49d96974d03f9a9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:45:16,532 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,534 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,535 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,535 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,536 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,538 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,541 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:45:16,542 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0bd843196e5e072ea49d96974d03f9a9; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=688383, jitterRate=-0.1246768981218338}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:45:16,542 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:16,543 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0bd843196e5e072ea49d96974d03f9a9: Running coprocessor pre-open hook at 1732020316524Writing region info on filesystem at 1732020316524Initializing all the Stores at 1732020316527 (+3 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020316527Cleaning up temporary data from old regions at 1732020316536 (+9 ms)Running coprocessor post-open hooks at 1732020316542 (+6 ms)Region opened successfully at 1732020316543 (+1 ms) 2024-11-19T12:45:16,545 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9., pid=6, masterSystemTime=1732020316514 2024-11-19T12:45:16,549 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,549 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:16,550 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0bd843196e5e072ea49d96974d03f9a9, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39285,1732020313788 2024-11-19T12:45:16,554 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0bd843196e5e072ea49d96974d03f9a9, server=aba5a916dfea,39285,1732020313788 because future has completed 2024-11-19T12:45:16,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:45:16,561 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0bd843196e5e072ea49d96974d03f9a9, server=aba5a916dfea,39285,1732020313788 in 198 msec 2024-11-19T12:45:16,565 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:45:16,566 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=0bd843196e5e072ea49d96974d03f9a9, ASSIGN in 369 msec 2024-11-19T12:45:16,567 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:45:16,567 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020316567"}]},"ts":"1732020316567"} 2024-11-19T12:45:16,571 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-19T12:45:16,573 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:45:16,576 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 459 msec 2024-11-19T12:45:21,160 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-19T12:45:21,222 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:45:21,223 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-19T12:45:23,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:45:23,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:23,375 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T12:45:23,375 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:23,376 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:45:23,376 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:45:23,377 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:45:23,377 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T12:45:26,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39801 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:45:26,140 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-19T12:45:26,144 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-19T12:45:26,149 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-19T12:45:26,150 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:45:26,151 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020326151 2024-11-19T12:45:26,159 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:26,159 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:26,159 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:26,159 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:26,159 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:26,160 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020315195 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020326151 2024-11-19T12:45:26,161 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:45:26,161 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020315195 is not closed yet, will try archiving it next time 2024-11-19T12:45:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741833_1009 (size=451) 2024-11-19T12:45:26,163 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741833_1009 (size=451) 2024-11-19T12:45:26,164 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020315195 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020315195 2024-11-19T12:45:26,170 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9., hostname=aba5a916dfea,39285,1732020313788, seqNum=2] 2024-11-19T12:45:38,215 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:38,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0bd843196e5e072ea49d96974d03f9a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:45:38,275 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/91f69dc4b49a48ada0ef4b0327ee0377 is 1080, key is row0001/info:/1732020326173/Put/seqid=0 2024-11-19T12:45:38,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741838_1014 (size=12509) 2024-11-19T12:45:38,286 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741838_1014 (size=12509) 2024-11-19T12:45:38,287 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/91f69dc4b49a48ada0ef4b0327ee0377 2024-11-19T12:45:38,336 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/91f69dc4b49a48ada0ef4b0327ee0377 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377 2024-11-19T12:45:38,347 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T12:45:38,355 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 135ms, sequenceid=11, compaction requested=false 2024-11-19T12:45:38,356 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0bd843196e5e072ea49d96974d03f9a9: 2024-11-19T12:45:41,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:45:46,238 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020346237 2024-11-19T12:45:46,455 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 212 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:45:46,456 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:46,456 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:46,456 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:46,457 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:46,457 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:46,457 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020326151 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020346237 2024-11-19T12:45:46,458 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46539:46539),(127.0.0.1/127.0.0.1:44229:44229)] 2024-11-19T12:45:46,459 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020326151 is not closed yet, will try archiving it next time 2024-11-19T12:45:46,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741837_1013 (size=12399) 2024-11-19T12:45:46,460 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741837_1013 (size=12399) 2024-11-19T12:45:46,663 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:48,872 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:51,080 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:53,285 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:53,286 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:45:53,286 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0bd843196e5e072ea49d96974d03f9a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:45:53,489 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:53,494 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/d21a21200c2d48d691e83eae304f30d2 is 1080, key is row0008/info:/1732020340219/Put/seqid=0 2024-11-19T12:45:53,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741840_1016 (size=12509) 2024-11-19T12:45:53,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741840_1016 (size=12509) 2024-11-19T12:45:53,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/d21a21200c2d48d691e83eae304f30d2 2024-11-19T12:45:53,513 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/d21a21200c2d48d691e83eae304f30d2 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2 2024-11-19T12:45:53,523 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2, entries=7, sequenceid=21, filesize=12.2 K 2024-11-19T12:45:53,725 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:53,725 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 439ms, sequenceid=21, compaction requested=false 2024-11-19T12:45:53,725 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0bd843196e5e072ea49d96974d03f9a9: 2024-11-19T12:45:53,725 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-19T12:45:53,726 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:45:53,727 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377 because midkey is the same as first or last row 2024-11-19T12:45:55,492 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:56,528 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T12:45:56,528 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T12:45:57,697 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:57,699 WARN [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:57,700 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C39285%2C1732020313788:(num 1732020346237) roll requested 2024-11-19T12:45:57,701 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020357701 2024-11-19T12:45:57,912 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 208 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:45:57,913 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:57,913 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:57,913 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:57,913 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:57,913 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:45:57,913 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020346237 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020357701 2024-11-19T12:45:57,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741839_1015 (size=7739) 2024-11-19T12:45:57,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741839_1015 (size=7739) 2024-11-19T12:45:57,917 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46539:46539),(127.0.0.1/127.0.0.1:44229:44229)] 2024-11-19T12:45:57,918 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020346237 is not closed yet, will try archiving it next time 2024-11-19T12:45:57,918 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020326151 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020326151 2024-11-19T12:45:59,901 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:01,524 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0bd843196e5e072ea49d96974d03f9a9, had cached 0 bytes from a total of 25018 2024-11-19T12:46:02,106 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:04,310 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:06,514 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:08,517 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:46:08,518 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020368518 2024-11-19T12:46:11,950 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:46:13,533 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5008 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:13,535 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5008 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK], DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK]] 2024-11-19T12:46:13,536 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C39285%2C1732020313788:(num 1732020368518) roll requested 2024-11-19T12:46:13,536 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:13,536 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:13,536 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:13,536 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:13,536 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:13,537 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020357701 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020368518 2024-11-19T12:46:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741841_1017 (size=4753) 2024-11-19T12:46:13,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741841_1017 (size=4753) 2024-11-19T12:46:13,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:46:13,548 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020357701 is not closed yet, will try archiving it next time 2024-11-19T12:46:13,549 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020373549 2024-11-19T12:46:18,553 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:18,553 WARN [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:46:18,553 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0bd843196e5e072ea49d96974d03f9a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:46:18,562 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5007 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:18,562 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5007 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:20,554 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:46:23,557 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5002 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:23,557 WARN [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5002 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:23,558 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:23,558 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:23,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:23,559 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:23,559 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:23,560 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020368518 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020373549 2024-11-19T12:46:23,562 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:46:23,562 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020368518 is not closed yet, will try archiving it next time 2024-11-19T12:46:23,562 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C39285%2C1732020313788:(num 1732020373549) roll requested 2024-11-19T12:46:23,563 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020383562 2024-11-19T12:46:23,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741842_1018 (size=1569) 2024-11-19T12:46:23,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741842_1018 (size=1569) 2024-11-19T12:46:23,565 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/479e9ba6f617449a8d30c04b1f5977e7 is 1080, key is row0015/info:/1732020355290/Put/seqid=0 2024-11-19T12:46:23,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741844_1020 (size=12509) 2024-11-19T12:46:23,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741844_1020 (size=12509) 2024-11-19T12:46:23,575 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/479e9ba6f617449a8d30c04b1f5977e7 2024-11-19T12:46:23,586 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/479e9ba6f617449a8d30c04b1f5977e7 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7 2024-11-19T12:46:23,596 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7, entries=7, sequenceid=31, filesize=12.2 K 2024-11-19T12:46:28,570 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:28,570 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:28,598 INFO [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:28,598 WARN [FSHLog-0-hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc-prefix:aba5a916dfea,39285,1732020313788 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:42099,DS-f9d7d828-f200-4799-a30f-cf8176737dbb,DISK], DatanodeInfoWithStorage[127.0.0.1:40091,DS-45c1fe17-96b3-48e2-a409-50d6ffc6ca59,DISK]] 2024-11-19T12:46:28,598 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 10045ms, sequenceid=31, compaction requested=true 2024-11-19T12:46:28,599 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,599 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0bd843196e5e072ea49d96974d03f9a9: 2024-11-19T12:46:28,599 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,599 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,599 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-19T12:46:28,599 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:46:28,599 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,599 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,599 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377 because midkey is the same as first or last row 2024-11-19T12:46:28,599 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020373549 with entries=1, filesize=430 B; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020383562 2024-11-19T12:46:28,600 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:46:28,601 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020373549 is not closed yet, will try archiving it next time 2024-11-19T12:46:28,601 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 0bd843196e5e072ea49d96974d03f9a9:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:46:28,601 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020346237 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020346237 2024-11-19T12:46:28,601 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C39285%2C1732020313788:(num 1732020383562) roll requested 2024-11-19T12:46:28,601 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020388601 2024-11-19T12:46:28,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741843_1019 (size=438) 2024-11-19T12:46:28,603 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:46:28,603 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:46:28,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741843_1019 (size=438) 2024-11-19T12:46:28,604 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020357701 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020357701 2024-11-19T12:46:28,606 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020368518 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020368518 2024-11-19T12:46:28,607 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:46:28,607 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020373549 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020373549 2024-11-19T12:46:28,608 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HStore(1541): 0bd843196e5e072ea49d96974d03f9a9/info is initiating minor compaction (all files) 2024-11-19T12:46:28,609 INFO [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 0bd843196e5e072ea49d96974d03f9a9/info in TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:28,609 INFO [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7] into tmpdir=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp, totalSize=36.6 K 2024-11-19T12:46:28,611 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting 91f69dc4b49a48ada0ef4b0327ee0377, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732020326173 2024-11-19T12:46:28,612 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting d21a21200c2d48d691e83eae304f30d2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732020340219 2024-11-19T12:46:28,613 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] compactions.Compactor(225): Compacting 479e9ba6f617449a8d30c04b1f5977e7, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732020355290 2024-11-19T12:46:28,616 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,616 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,616 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,616 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,616 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,617 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020383562 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020388601 2024-11-19T12:46:28,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741845_1021 (size=93) 2024-11-19T12:46:28,619 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741845_1021 (size=93) 2024-11-19T12:46:28,620 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020383562 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs/aba5a916dfea%2C39285%2C1732020313788.1732020383562 2024-11-19T12:46:28,632 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:46:28,632 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39285%2C1732020313788.1732020388632 2024-11-19T12:46:28,648 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,648 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,648 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:28,649 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020388601 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/WALs/aba5a916dfea,39285,1732020313788/aba5a916dfea%2C39285%2C1732020313788.1732020388632 2024-11-19T12:46:28,651 INFO [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 0bd843196e5e072ea49d96974d03f9a9#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:46:28,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741846_1022 (size=1258) 2024-11-19T12:46:28,652 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741846_1022 (size=1258) 2024-11-19T12:46:28,652 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/584a99061f1e46998fe66d8d872fbb79 is 1080, key is row0001/info:/1732020326173/Put/seqid=0 2024-11-19T12:46:28,658 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44229:44229),(127.0.0.1/127.0.0.1:46539:46539)] 2024-11-19T12:46:28,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741848_1024 (size=27710) 2024-11-19T12:46:28,666 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741848_1024 (size=27710) 2024-11-19T12:46:28,680 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/584a99061f1e46998fe66d8d872fbb79 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/584a99061f1e46998fe66d8d872fbb79 2024-11-19T12:46:28,698 INFO [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0bd843196e5e072ea49d96974d03f9a9/info of 0bd843196e5e072ea49d96974d03f9a9 into 584a99061f1e46998fe66d8d872fbb79(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:46:28,698 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 0bd843196e5e072ea49d96974d03f9a9: 2024-11-19T12:46:28,700 INFO [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9., storeName=0bd843196e5e072ea49d96974d03f9a9/info, priority=13, startTime=1732020388601; duration=0sec 2024-11-19T12:46:28,700 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T12:46:28,700 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/584a99061f1e46998fe66d8d872fbb79 because midkey is the same as first or last row 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/584a99061f1e46998fe66d8d872fbb79 because midkey is the same as first or last row 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-19T12:46:28,701 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:46:28,702 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/584a99061f1e46998fe66d8d872fbb79 because midkey is the same as first or last row 2024-11-19T12:46:28,702 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:46:28,702 DEBUG [RS:0;aba5a916dfea:39285-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 0bd843196e5e072ea49d96974d03f9a9:info 2024-11-19T12:46:40,674 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39285 {}] regionserver.HRegion(8855): Flush requested on 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:46:40,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 0bd843196e5e072ea49d96974d03f9a9 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:46:40,684 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/a5fdda12a3e14781beffcee2dcd483a4 is 1080, key is row0022/info:/1732020388634/Put/seqid=0 2024-11-19T12:46:40,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741849_1025 (size=12509) 2024-11-19T12:46:40,690 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741849_1025 (size=12509) 2024-11-19T12:46:40,691 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/a5fdda12a3e14781beffcee2dcd483a4 2024-11-19T12:46:40,700 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/a5fdda12a3e14781beffcee2dcd483a4 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/a5fdda12a3e14781beffcee2dcd483a4 2024-11-19T12:46:40,708 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/a5fdda12a3e14781beffcee2dcd483a4, entries=7, sequenceid=42, filesize=12.2 K 2024-11-19T12:46:40,710 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 36ms, sequenceid=42, compaction requested=false 2024-11-19T12:46:40,710 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 0bd843196e5e072ea49d96974d03f9a9: 2024-11-19T12:46:40,710 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-19T12:46:40,710 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:46:40,710 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/584a99061f1e46998fe66d8d872fbb79 because midkey is the same as first or last row 2024-11-19T12:46:41,951 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:46:46,525 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0bd843196e5e072ea49d96974d03f9a9, had cached 0 bytes from a total of 40219 2024-11-19T12:46:48,691 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:46:48,691 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:46:48,692 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:48,698 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:48,699 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:48,699 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:46:48,699 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:46:48,700 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=245640440, stopped=false 2024-11-19T12:46:48,700 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,39801,1732020312906 2024-11-19T12:46:48,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:48,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:48,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:48,758 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:46:48,758 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:48,758 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:46:48,759 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:48,759 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:48,760 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:48,760 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,39285,1732020313788' ***** 2024-11-19T12:46:48,760 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:48,760 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:46:48,760 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:46:48,761 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:46:48,761 INFO [RS:0;aba5a916dfea:39285 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:46:48,761 INFO [RS:0;aba5a916dfea:39285 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:46:48,761 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(3091): Received CLOSE for 0bd843196e5e072ea49d96974d03f9a9 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,39285,1732020313788 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:39285. 2024-11-19T12:46:48,762 DEBUG [RS:0;aba5a916dfea:39285 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:48,762 DEBUG [RS:0;aba5a916dfea:39285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:46:48,762 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:46:48,762 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0bd843196e5e072ea49d96974d03f9a9, disabling compactions & flushes 2024-11-19T12:46:48,762 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. after waiting 0 ms 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:48,763 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T12:46:48,763 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 0bd843196e5e072ea49d96974d03f9a9=TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.} 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:46:48,763 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0bd843196e5e072ea49d96974d03f9a9 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-19T12:46:48,763 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:46:48,763 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:46:48,763 DEBUG [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1351): Waiting on 0bd843196e5e072ea49d96974d03f9a9, 1588230740 2024-11-19T12:46:48,764 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-19T12:46:48,769 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/0c6e21ac4e5d4aff86d912f282051892 is 1080, key is row0029/info:/1732020402678/Put/seqid=0 2024-11-19T12:46:48,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741850_1026 (size=8193) 2024-11-19T12:46:48,775 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741850_1026 (size=8193) 2024-11-19T12:46:48,776 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/0c6e21ac4e5d4aff86d912f282051892 2024-11-19T12:46:48,787 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/.tmp/info/0c6e21ac4e5d4aff86d912f282051892 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/0c6e21ac4e5d4aff86d912f282051892 2024-11-19T12:46:48,792 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/info/fc0249fb3e594f889c0e603ecef1a934 is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9./info:regioninfo/1732020316550/Put/seqid=0 2024-11-19T12:46:48,797 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/0c6e21ac4e5d4aff86d912f282051892, entries=3, sequenceid=48, filesize=8.0 K 2024-11-19T12:46:48,799 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 36ms, sequenceid=48, compaction requested=true 2024-11-19T12:46:48,802 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7] to archive 2024-11-19T12:46:48,803 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741851_1027 (size=7016) 2024-11-19T12:46:48,804 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/info/fc0249fb3e594f889c0e603ecef1a934 2024-11-19T12:46:48,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741851_1027 (size=7016) 2024-11-19T12:46:48,806 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:46:48,809 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/91f69dc4b49a48ada0ef4b0327ee0377 2024-11-19T12:46:48,813 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/d21a21200c2d48d691e83eae304f30d2 2024-11-19T12:46:48,816 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7 to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/archive/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/info/479e9ba6f617449a8d30c04b1f5977e7 2024-11-19T12:46:48,832 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/ns/613c484e792a4d678d50909527eef36a is 43, key is default/ns:d/1732020315825/Put/seqid=0 2024-11-19T12:46:48,830 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=aba5a916dfea:39801 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T12:46:48,834 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [91f69dc4b49a48ada0ef4b0327ee0377=12509, d21a21200c2d48d691e83eae304f30d2=12509, 479e9ba6f617449a8d30c04b1f5977e7=12509] 2024-11-19T12:46:48,842 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/default/TestLogRolling-testSlowSyncLogRolling/0bd843196e5e072ea49d96974d03f9a9/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-19T12:46:48,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741852_1028 (size=5153) 2024-11-19T12:46:48,843 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741852_1028 (size=5153) 2024-11-19T12:46:48,844 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:48,845 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0bd843196e5e072ea49d96974d03f9a9: Waiting for close lock at 1732020408762Running coprocessor pre-close hooks at 1732020408762Disabling compacts and flushes for region at 1732020408762Disabling writes for close at 1732020408763 (+1 ms)Obtaining lock to block concurrent updates at 1732020408763Preparing flush snapshotting stores in 0bd843196e5e072ea49d96974d03f9a9 at 1732020408763Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732020408763Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. at 1732020408764 (+1 ms)Flushing 0bd843196e5e072ea49d96974d03f9a9/info: creating writer at 1732020408764Flushing 0bd843196e5e072ea49d96974d03f9a9/info: appending metadata at 1732020408768 (+4 ms)Flushing 0bd843196e5e072ea49d96974d03f9a9/info: closing flushed file at 1732020408768Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@61c24086: reopening flushed file at 1732020408786 (+18 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 0bd843196e5e072ea49d96974d03f9a9 in 36ms, sequenceid=48, compaction requested=true at 1732020408799 (+13 ms)Writing region close event to WAL at 1732020408836 (+37 ms)Running coprocessor post-close hooks at 1732020408843 (+7 ms)Closed at 1732020408844 (+1 ms) 2024-11-19T12:46:48,845 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/ns/613c484e792a4d678d50909527eef36a 2024-11-19T12:46:48,845 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732020316108.0bd843196e5e072ea49d96974d03f9a9. 2024-11-19T12:46:48,871 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/table/e26e92aba56240b585c17b38baa3f880 is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732020316567/Put/seqid=0 2024-11-19T12:46:48,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741853_1029 (size=5396) 2024-11-19T12:46:48,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741853_1029 (size=5396) 2024-11-19T12:46:48,879 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/table/e26e92aba56240b585c17b38baa3f880 2024-11-19T12:46:48,888 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/info/fc0249fb3e594f889c0e603ecef1a934 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/info/fc0249fb3e594f889c0e603ecef1a934 2024-11-19T12:46:48,897 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/info/fc0249fb3e594f889c0e603ecef1a934, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T12:46:48,899 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/ns/613c484e792a4d678d50909527eef36a as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/ns/613c484e792a4d678d50909527eef36a 2024-11-19T12:46:48,907 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/ns/613c484e792a4d678d50909527eef36a, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:46:48,909 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/.tmp/table/e26e92aba56240b585c17b38baa3f880 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/table/e26e92aba56240b585c17b38baa3f880 2024-11-19T12:46:48,919 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/table/e26e92aba56240b585c17b38baa3f880, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T12:46:48,921 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false 2024-11-19T12:46:48,927 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:46:48,928 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:46:48,928 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:48,928 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020408763Running coprocessor pre-close hooks at 1732020408763Disabling compacts and flushes for region at 1732020408763Disabling writes for close at 1732020408763Obtaining lock to block concurrent updates at 1732020408764 (+1 ms)Preparing flush snapshotting stores in 1588230740 at 1732020408764Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732020408764Flushing stores of hbase:meta,,1.1588230740 at 1732020408766 (+2 ms)Flushing 1588230740/info: creating writer at 1732020408766Flushing 1588230740/info: appending metadata at 1732020408791 (+25 ms)Flushing 1588230740/info: closing flushed file at 1732020408791Flushing 1588230740/ns: creating writer at 1732020408815 (+24 ms)Flushing 1588230740/ns: appending metadata at 1732020408832 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732020408832Flushing 1588230740/table: creating writer at 1732020408854 (+22 ms)Flushing 1588230740/table: appending metadata at 1732020408871 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732020408871Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3204e44d: reopening flushed file at 1732020408887 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b191012: reopening flushed file at 1732020408897 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2c9f3bbb: reopening flushed file at 1732020408907 (+10 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 157ms, sequenceid=11, compaction requested=false at 1732020408921 (+14 ms)Writing region close event to WAL at 1732020408922 (+1 ms)Running coprocessor post-close hooks at 1732020408928 (+6 ms)Closed at 1732020408928 2024-11-19T12:46:48,929 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:48,964 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,39285,1732020313788; all regions closed. 2024-11-19T12:46:48,966 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,966 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,966 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,967 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,967 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741834_1010 (size=3066) 2024-11-19T12:46:48,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741834_1010 (size=3066) 2024-11-19T12:46:48,976 DEBUG [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs 2024-11-19T12:46:48,976 INFO [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C39285%2C1732020313788.meta:.meta(num 1732020315625) 2024-11-19T12:46:48,977 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,977 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,977 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,977 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,977 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:48,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741847_1023 (size=12695) 2024-11-19T12:46:48,981 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741847_1023 (size=12695) 2024-11-19T12:46:48,985 DEBUG [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/oldWALs 2024-11-19T12:46:48,985 INFO [RS:0;aba5a916dfea:39285 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C39285%2C1732020313788:(num 1732020388632) 2024-11-19T12:46:48,986 DEBUG [RS:0;aba5a916dfea:39285 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:48,986 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:46:48,986 INFO [RS:0;aba5a916dfea:39285 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:46:48,986 INFO [RS:0;aba5a916dfea:39285 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T12:46:48,986 INFO [RS:0;aba5a916dfea:39285 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:46:48,986 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:46:48,987 INFO [RS:0;aba5a916dfea:39285 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39285 2024-11-19T12:46:49,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:46:49,015 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,39285,1732020313788 2024-11-19T12:46:49,015 INFO [RS:0;aba5a916dfea:39285 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:46:49,025 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,39285,1732020313788] 2024-11-19T12:46:49,033 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,39285,1732020313788 already deleted, retry=false 2024-11-19T12:46:49,033 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,39285,1732020313788 expired; onlineServers=0 2024-11-19T12:46:49,033 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,39801,1732020312906' ***** 2024-11-19T12:46:49,033 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:46:49,034 INFO [M:0;aba5a916dfea:39801 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:46:49,034 INFO [M:0;aba5a916dfea:39801 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:46:49,034 DEBUG [M:0;aba5a916dfea:39801 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:46:49,034 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:46:49,034 DEBUG [M:0;aba5a916dfea:39801 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:46:49,034 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020314867 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020314867,5,FailOnTimeoutGroup] 2024-11-19T12:46:49,034 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020314863 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020314863,5,FailOnTimeoutGroup] 2024-11-19T12:46:49,035 INFO [M:0;aba5a916dfea:39801 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:46:49,035 INFO [M:0;aba5a916dfea:39801 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:46:49,035 DEBUG [M:0;aba5a916dfea:39801 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:46:49,035 INFO [M:0;aba5a916dfea:39801 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:46:49,036 INFO [M:0;aba5a916dfea:39801 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:46:49,036 INFO [M:0;aba5a916dfea:39801 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:46:49,036 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:46:49,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:46:49,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:49,041 DEBUG [M:0;aba5a916dfea:39801 {}] zookeeper.ZKUtil(347): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:46:49,041 WARN [M:0;aba5a916dfea:39801 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:46:49,042 INFO [M:0;aba5a916dfea:39801 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/.lastflushedseqids 2024-11-19T12:46:49,044 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:46:49,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741854_1030 (size=130) 2024-11-19T12:46:49,067 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741854_1030 (size=130) 2024-11-19T12:46:49,067 INFO [M:0;aba5a916dfea:39801 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:46:49,067 INFO [M:0;aba5a916dfea:39801 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:46:49,067 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:46:49,068 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:49,068 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:49,068 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:46:49,068 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:49,068 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-19T12:46:49,086 DEBUG [M:0;aba5a916dfea:39801 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe9eead6a8754dfc86f041c6ee4b25ec is 82, key is hbase:meta,,1/info:regioninfo/1732020315697/Put/seqid=0 2024-11-19T12:46:49,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741855_1031 (size=5672) 2024-11-19T12:46:49,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741855_1031 (size=5672) 2024-11-19T12:46:49,097 INFO [M:0;aba5a916dfea:39801 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe9eead6a8754dfc86f041c6ee4b25ec 2024-11-19T12:46:49,124 DEBUG [M:0;aba5a916dfea:39801 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a49f2b33d524f729c0fda7633f1d3ea is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020316575/Put/seqid=0 2024-11-19T12:46:49,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:49,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39285-0x101546a2a8f0001, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:49,126 INFO [RS:0;aba5a916dfea:39285 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:46:49,126 INFO [RS:0;aba5a916dfea:39285 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,39285,1732020313788; zookeeper connection closed. 2024-11-19T12:46:49,126 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@18e69c07 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@18e69c07 2024-11-19T12:46:49,127 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:46:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741856_1032 (size=6247) 2024-11-19T12:46:49,130 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741856_1032 (size=6247) 2024-11-19T12:46:49,131 INFO [M:0;aba5a916dfea:39801 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a49f2b33d524f729c0fda7633f1d3ea 2024-11-19T12:46:49,137 INFO [M:0;aba5a916dfea:39801 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8a49f2b33d524f729c0fda7633f1d3ea 2024-11-19T12:46:49,155 DEBUG [M:0;aba5a916dfea:39801 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ca11e4f0d7d456d829f4700f3ee7a7f is 69, key is aba5a916dfea,39285,1732020313788/rs:state/1732020314945/Put/seqid=0 2024-11-19T12:46:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741857_1033 (size=5156) 2024-11-19T12:46:49,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741857_1033 (size=5156) 2024-11-19T12:46:49,163 INFO [M:0;aba5a916dfea:39801 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ca11e4f0d7d456d829f4700f3ee7a7f 2024-11-19T12:46:49,194 DEBUG [M:0;aba5a916dfea:39801 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/943a85eeb00e4c67a793f203e521ba48 is 52, key is load_balancer_on/state:d/1732020316089/Put/seqid=0 2024-11-19T12:46:49,203 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741858_1034 (size=5056) 2024-11-19T12:46:49,204 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741858_1034 (size=5056) 2024-11-19T12:46:49,204 INFO [M:0;aba5a916dfea:39801 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/943a85eeb00e4c67a793f203e521ba48 2024-11-19T12:46:49,214 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/fe9eead6a8754dfc86f041c6ee4b25ec as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fe9eead6a8754dfc86f041c6ee4b25ec 2024-11-19T12:46:49,223 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/fe9eead6a8754dfc86f041c6ee4b25ec, entries=8, sequenceid=59, filesize=5.5 K 2024-11-19T12:46:49,225 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/8a49f2b33d524f729c0fda7633f1d3ea as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8a49f2b33d524f729c0fda7633f1d3ea 2024-11-19T12:46:49,233 INFO [M:0;aba5a916dfea:39801 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 8a49f2b33d524f729c0fda7633f1d3ea 2024-11-19T12:46:49,233 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/8a49f2b33d524f729c0fda7633f1d3ea, entries=6, sequenceid=59, filesize=6.1 K 2024-11-19T12:46:49,235 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/0ca11e4f0d7d456d829f4700f3ee7a7f as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ca11e4f0d7d456d829f4700f3ee7a7f 2024-11-19T12:46:49,244 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/0ca11e4f0d7d456d829f4700f3ee7a7f, entries=1, sequenceid=59, filesize=5.0 K 2024-11-19T12:46:49,246 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/943a85eeb00e4c67a793f203e521ba48 as hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/943a85eeb00e4c67a793f203e521ba48 2024-11-19T12:46:49,255 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/943a85eeb00e4c67a793f203e521ba48, entries=1, sequenceid=59, filesize=4.9 K 2024-11-19T12:46:49,257 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 188ms, sequenceid=59, compaction requested=false 2024-11-19T12:46:49,258 INFO [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:49,258 DEBUG [M:0;aba5a916dfea:39801 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020409067Disabling compacts and flushes for region at 1732020409067Disabling writes for close at 1732020409068 (+1 ms)Obtaining lock to block concurrent updates at 1732020409068Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020409068Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732020409069 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020409069Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020409069Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020409086 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020409086Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020409104 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020409123 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020409123Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020409137 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020409154 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020409154Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020409172 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020409193 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020409193Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@31614a32: reopening flushed file at 1732020409213 (+20 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@27d0f75a: reopening flushed file at 1732020409224 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@46d3ec08: reopening flushed file at 1732020409234 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4c39d4b0: reopening flushed file at 1732020409245 (+11 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 188ms, sequenceid=59, compaction requested=false at 1732020409257 (+12 ms)Writing region close event to WAL at 1732020409258 (+1 ms)Closed at 1732020409258 2024-11-19T12:46:49,259 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:49,260 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:49,260 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:49,260 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:49,260 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:49,268 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42099 is added to blk_1073741830_1006 (size=27973) 2024-11-19T12:46:49,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40091 is added to blk_1073741830_1006 (size=27973) 2024-11-19T12:46:49,270 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:46:49,270 INFO [M:0;aba5a916dfea:39801 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:46:49,270 INFO [M:0;aba5a916dfea:39801 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39801 2024-11-19T12:46:49,270 INFO [M:0;aba5a916dfea:39801 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:46:49,390 INFO [M:0;aba5a916dfea:39801 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:46:49,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:49,390 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39801-0x101546a2a8f0000, quorum=127.0.0.1:60987, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:49,394 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55d18735{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:49,397 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@15370523{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:49,397 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:49,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@335a4f9a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:49,397 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ae73635{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:49,400 WARN [BP-509242514-172.17.0.2-1732020308604 heartbeating to localhost/127.0.0.1:44083 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:46:49,400 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:46:49,400 WARN [BP-509242514-172.17.0.2-1732020308604 heartbeating to localhost/127.0.0.1:44083 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-509242514-172.17.0.2-1732020308604 (Datanode Uuid ca0489c0-bffa-42bf-94d7-a005e65e8750) service to localhost/127.0.0.1:44083 2024-11-19T12:46:49,400 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:46:49,402 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data3/current/BP-509242514-172.17.0.2-1732020308604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:49,402 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data4/current/BP-509242514-172.17.0.2-1732020308604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:49,403 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:46:49,405 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@59e63bea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:49,405 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@264a9341{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:49,406 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:49,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5bdc1e47{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:49,406 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2aa5bb6e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:49,408 WARN [BP-509242514-172.17.0.2-1732020308604 heartbeating to localhost/127.0.0.1:44083 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:46:49,408 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:46:49,408 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:46:49,408 WARN [BP-509242514-172.17.0.2-1732020308604 heartbeating to localhost/127.0.0.1:44083 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-509242514-172.17.0.2-1732020308604 (Datanode Uuid aa502fa1-9332-4ba0-883a-b866ff71d3be) service to localhost/127.0.0.1:44083 2024-11-19T12:46:49,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data1/current/BP-509242514-172.17.0.2-1732020308604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:49,409 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/cluster_05105463-cc47-222b-a632-b2067f502bc6/data/data2/current/BP-509242514-172.17.0.2-1732020308604 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:49,409 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:46:49,419 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4c77270f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:46:49,420 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c5145e6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:49,420 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:49,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@46a86f8c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:49,420 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f2ab976{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:49,429 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:46:49,466 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:46:49,477 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44083 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44083 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44083 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/aba5a916dfea:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44083 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:44083 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/aba5a916dfea:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/aba5a916dfea:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: ForkJoinPool-2-worker-3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44083 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@1ab9ef81 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) - Thread LEAK? -, OpenFileDescriptor=404 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=157 (was 348), ProcessCount=12 (was 11) - ProcessCount LEAK? -, AvailableMemoryMB=7261 (was 7176) - AvailableMemoryMB LEAK? - 2024-11-19T12:46:49,485 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=404, MaxFileDescriptor=1048576, SystemLoadAverage=157, ProcessCount=11, AvailableMemoryMB=7262 2024-11-19T12:46:49,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:46:49,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.log.dir so I do NOT create it in target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90 2024-11-19T12:46:49,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/cfacc634-a124-fddc-a57b-83e4080fea2e/hadoop.tmp.dir so I do NOT create it in target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90 2024-11-19T12:46:49,486 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01, deleteOnExit=true 2024-11-19T12:46:49,486 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/test.cache.data in system properties and HBase conf 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:46:49,487 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:46:49,487 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:46:49,488 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:46:49,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:46:49,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:46:49,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:46:49,489 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:46:49,505 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:46:49,756 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:49,763 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:49,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:49,765 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:49,765 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:46:49,766 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:49,766 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@43fab4bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:49,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1ea6e47a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:49,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2281152e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/java.io.tmpdir/jetty-localhost-39599-hadoop-hdfs-3_4_1-tests_jar-_-any-5464276272664400737/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:46:49,878 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3485277{HTTP/1.1, (http/1.1)}{localhost:39599} 2024-11-19T12:46:49,878 INFO [Time-limited test {}] server.Server(415): Started @103576ms 2024-11-19T12:46:49,893 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:46:50,116 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:50,121 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:50,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:50,122 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:50,122 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:46:50,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:50,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:50,234 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7e335929{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/java.io.tmpdir/jetty-localhost-36153-hadoop-hdfs-3_4_1-tests_jar-_-any-4148554264381334369/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:50,234 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:36153} 2024-11-19T12:46:50,234 INFO [Time-limited test {}] server.Server(415): Started @103932ms 2024-11-19T12:46:50,236 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:46:50,288 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:50,293 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:50,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:50,294 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:50,294 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:46:50,294 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:50,295 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:50,404 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@11ff445e{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/java.io.tmpdir/jetty-localhost-42881-hadoop-hdfs-3_4_1-tests_jar-_-any-14133517589364858942/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:50,405 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:42881} 2024-11-19T12:46:50,405 INFO [Time-limited test {}] server.Server(415): Started @104103ms 2024-11-19T12:46:50,406 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:46:50,964 WARN [Thread-451 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data1/current/BP-1626899054-172.17.0.2-1732020409518/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:50,965 WARN [Thread-452 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data2/current/BP-1626899054-172.17.0.2-1732020409518/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:50,985 WARN [Thread-415 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:46:50,988 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66826f666619179c with lease ID 0x49d8a1b92d99702: Processing first storage report for DS-1e609aa5-23ec-462d-a758-fff0124cff3f from datanode DatanodeRegistration(127.0.0.1:34293, datanodeUuid=b254b1f2-cc98-47d0-a26f-483cab6860a8, infoPort=45901, infoSecurePort=0, ipcPort=35739, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518) 2024-11-19T12:46:50,988 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66826f666619179c with lease ID 0x49d8a1b92d99702: from storage DS-1e609aa5-23ec-462d-a758-fff0124cff3f node DatanodeRegistration(127.0.0.1:34293, datanodeUuid=b254b1f2-cc98-47d0-a26f-483cab6860a8, infoPort=45901, infoSecurePort=0, ipcPort=35739, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:50,988 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x66826f666619179c with lease ID 0x49d8a1b92d99702: Processing first storage report for DS-f80f8899-10b5-4c96-9097-fadb8d2797fb from datanode DatanodeRegistration(127.0.0.1:34293, datanodeUuid=b254b1f2-cc98-47d0-a26f-483cab6860a8, infoPort=45901, infoSecurePort=0, ipcPort=35739, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518) 2024-11-19T12:46:50,988 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x66826f666619179c with lease ID 0x49d8a1b92d99702: from storage DS-f80f8899-10b5-4c96-9097-fadb8d2797fb node DatanodeRegistration(127.0.0.1:34293, datanodeUuid=b254b1f2-cc98-47d0-a26f-483cab6860a8, infoPort=45901, infoSecurePort=0, ipcPort=35739, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:51,112 WARN [Thread-462 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data3/current/BP-1626899054-172.17.0.2-1732020409518/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:51,112 WARN [Thread-463 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data4/current/BP-1626899054-172.17.0.2-1732020409518/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:51,133 WARN [Thread-438 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:46:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71859207259f224f with lease ID 0x49d8a1b92d99703: Processing first storage report for DS-bcfe8637-2809-443a-8676-17884baa4e69 from datanode DatanodeRegistration(127.0.0.1:41259, datanodeUuid=b8341f9d-60fa-4208-8019-a80e6e33d00b, infoPort=39291, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518) 2024-11-19T12:46:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71859207259f224f with lease ID 0x49d8a1b92d99703: from storage DS-bcfe8637-2809-443a-8676-17884baa4e69 node DatanodeRegistration(127.0.0.1:41259, datanodeUuid=b8341f9d-60fa-4208-8019-a80e6e33d00b, infoPort=39291, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x71859207259f224f with lease ID 0x49d8a1b92d99703: Processing first storage report for DS-586e7fac-488c-43fa-8bdf-25f6e2ac0e76 from datanode DatanodeRegistration(127.0.0.1:41259, datanodeUuid=b8341f9d-60fa-4208-8019-a80e6e33d00b, infoPort=39291, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518) 2024-11-19T12:46:51,136 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x71859207259f224f with lease ID 0x49d8a1b92d99703: from storage DS-586e7fac-488c-43fa-8bdf-25f6e2ac0e76 node DatanodeRegistration(127.0.0.1:41259, datanodeUuid=b8341f9d-60fa-4208-8019-a80e6e33d00b, infoPort=39291, infoSecurePort=0, ipcPort=34953, storageInfo=lv=-57;cid=testClusterID;nsid=695618690;c=1732020409518), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:46:51,143 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90 2024-11-19T12:46:51,146 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/zookeeper_0, clientPort=50359, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:46:51,147 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=50359 2024-11-19T12:46:51,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:46:51,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:46:51,162 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714 with version=8 2024-11-19T12:46:51,162 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:46:51,166 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:46:51,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,166 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,166 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:46:51,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,167 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:46:51,167 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:46:51,167 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:46:51,168 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34725 2024-11-19T12:46:51,172 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34725 connecting to ZooKeeper ensemble=127.0.0.1:50359 2024-11-19T12:46:51,211 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:347250x0, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:46:51,212 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34725-0x101546badce0000 connected 2024-11-19T12:46:51,274 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,277 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,280 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:51,280 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714, hbase.cluster.distributed=false 2024-11-19T12:46:51,282 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:46:51,283 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34725 2024-11-19T12:46:51,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34725 2024-11-19T12:46:51,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34725 2024-11-19T12:46:51,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34725 2024-11-19T12:46:51,284 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34725 2024-11-19T12:46:51,300 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:46:51,301 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:46:51,302 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33681 2024-11-19T12:46:51,304 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:33681 connecting to ZooKeeper ensemble=127.0.0.1:50359 2024-11-19T12:46:51,304 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,307 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:336810x0, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:46:51,324 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:51,324 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:33681-0x101546badce0001 connected 2024-11-19T12:46:51,324 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:46:51,325 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:46:51,326 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:46:51,327 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:46:51,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33681 2024-11-19T12:46:51,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33681 2024-11-19T12:46:51,331 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33681 2024-11-19T12:46:51,332 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33681 2024-11-19T12:46:51,332 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33681 2024-11-19T12:46:51,345 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:34725 2024-11-19T12:46:51,345 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:51,357 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:51,357 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:46:51,365 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,366 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,366 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:46:51,367 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,34725,1732020411165 from backup master directory 2024-11-19T12:46:51,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:51,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,374 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:51,374 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:46:51,374 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,379 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/hbase.id] with ID: 0b30a533-b786-4b68-a803-4a69d6a9a98f 2024-11-19T12:46:51,379 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/.tmp/hbase.id 2024-11-19T12:46:51,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:46:51,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:46:51,388 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/.tmp/hbase.id]:[hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/hbase.id] 2024-11-19T12:46:51,404 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:51,405 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:46:51,407 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T12:46:51,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,415 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:46:51,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:46:51,425 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:46:51,426 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:46:51,427 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:51,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:46:51,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:46:51,439 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store 2024-11-19T12:46:51,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:46:51,450 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:46:51,451 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:51,451 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:51,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020411451Disabling compacts and flushes for region at 1732020411451Disabling writes for close at 1732020411451Writing region close event to WAL at 1732020411451Closed at 1732020411451 2024-11-19T12:46:51,453 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/.initializing 2024-11-19T12:46:51,453 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/WALs/aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,456 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C34725%2C1732020411165, suffix=, logDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/WALs/aba5a916dfea,34725,1732020411165, archiveDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/oldWALs, maxLogs=10 2024-11-19T12:46:51,457 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C34725%2C1732020411165.1732020411457 2024-11-19T12:46:51,467 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/WALs/aba5a916dfea,34725,1732020411165/aba5a916dfea%2C34725%2C1732020411165.1732020411457 2024-11-19T12:46:51,468 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45901:45901),(127.0.0.1/127.0.0.1:39291:39291)] 2024-11-19T12:46:51,469 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:46:51,469 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:51,469 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,469 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,471 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,473 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:46:51,473 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:51,474 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,476 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:46:51,476 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:51,477 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,480 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:46:51,480 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:51,481 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,483 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:46:51,483 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,484 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:51,484 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,485 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,486 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,487 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,488 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,488 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:46:51,490 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:51,492 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:46:51,493 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708071, jitterRate=-0.09964124858379364}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:46:51,494 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020411469Initializing all the Stores at 1732020411471 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020411471Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020411471Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020411471Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020411471Cleaning up temporary data from old regions at 1732020411488 (+17 ms)Region opened successfully at 1732020411494 (+6 ms) 2024-11-19T12:46:51,494 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:46:51,499 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@23e28865, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:46:51,500 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:46:51,500 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:46:51,500 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:46:51,500 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:46:51,501 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:46:51,502 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:46:51,502 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:46:51,505 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:46:51,506 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:46:51,523 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:46:51,524 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:46:51,525 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:46:51,532 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:46:51,533 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:46:51,534 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:46:51,540 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:46:51,542 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:46:51,548 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:46:51,551 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:46:51,557 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:46:51,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:51,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:51,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,565 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,566 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,34725,1732020411165, sessionid=0x101546badce0000, setting cluster-up flag (Was=false) 2024-11-19T12:46:51,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,607 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:46:51,608 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,624 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:51,649 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:46:51,650 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,34725,1732020411165 2024-11-19T12:46:51,652 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:46:51,654 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:51,655 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:46:51,655 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:46:51,655 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,34725,1732020411165 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:46:51,657 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:51,657 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:51,657 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:51,657 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:51,657 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:46:51,658 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,658 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:46:51,658 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020441659 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:46:51,659 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,660 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:46:51,660 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:46:51,660 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:46:51,660 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:51,660 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:46:51,660 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:46:51,660 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:46:51,661 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020411661,5,FailOnTimeoutGroup] 2024-11-19T12:46:51,661 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020411661,5,FailOnTimeoutGroup] 2024-11-19T12:46:51,661 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,661 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:46:51,662 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,662 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,662 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,663 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:46:51,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:46:51,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:46:51,673 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:46:51,673 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714 2024-11-19T12:46:51,681 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:46:51,682 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:46:51,683 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:51,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:46:51,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:46:51,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:51,687 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:46:51,689 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:46:51,689 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:51,693 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:46:51,695 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:46:51,695 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:51,696 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:46:51,698 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:46:51,698 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:51,698 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:51,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:46:51,699 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740 2024-11-19T12:46:51,700 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740 2024-11-19T12:46:51,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:46:51,702 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:46:51,702 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:46:51,704 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:46:51,707 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:46:51,707 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=825749, jitterRate=0.0499950647354126}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:46:51,708 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020411683Initializing all the Stores at 1732020411684 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020411684Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020411684Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020411684Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020411684Cleaning up temporary data from old regions at 1732020411702 (+18 ms)Region opened successfully at 1732020411708 (+6 ms) 2024-11-19T12:46:51,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:46:51,709 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:46:51,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:46:51,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:46:51,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:46:51,709 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:51,709 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020411709Disabling compacts and flushes for region at 1732020411709Disabling writes for close at 1732020411709Writing region close event to WAL at 1732020411709Closed at 1732020411709 2024-11-19T12:46:51,711 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:51,711 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:46:51,712 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:46:51,714 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:46:51,715 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:46:51,734 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(746): ClusterId : 0b30a533-b786-4b68-a803-4a69d6a9a98f 2024-11-19T12:46:51,735 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:46:51,767 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:46:51,767 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:46:51,775 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:46:51,775 DEBUG [RS:0;aba5a916dfea:33681 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6be0c5c3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:46:51,788 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:33681 2024-11-19T12:46:51,789 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:46:51,789 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:46:51,789 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:46:51,790 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,34725,1732020411165 with port=33681, startcode=1732020411300 2024-11-19T12:46:51,790 DEBUG [RS:0;aba5a916dfea:33681 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:46:51,792 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50371, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:46:51,793 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34725 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,793 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34725 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,796 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714 2024-11-19T12:46:51,796 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:34279 2024-11-19T12:46:51,796 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:46:51,807 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:46:51,807 DEBUG [RS:0;aba5a916dfea:33681 {}] zookeeper.ZKUtil(111): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,807 WARN [RS:0;aba5a916dfea:33681 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:46:51,808 INFO [RS:0;aba5a916dfea:33681 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:51,808 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/WALs/aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,808 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,33681,1732020411300] 2024-11-19T12:46:51,812 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:46:51,819 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:46:51,819 INFO [RS:0;aba5a916dfea:33681 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:46:51,819 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,819 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:46:51,820 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:46:51,821 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,821 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:51,822 DEBUG [RS:0;aba5a916dfea:33681 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:51,822 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,822 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,822 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,823 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,823 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,823 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,33681,1732020411300-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:46:51,838 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:46:51,838 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,33681,1732020411300-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,838 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,838 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.Replication(171): aba5a916dfea,33681,1732020411300 started 2024-11-19T12:46:51,854 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:51,854 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,33681,1732020411300, RpcServer on aba5a916dfea/172.17.0.2:33681, sessionid=0x101546badce0001 2024-11-19T12:46:51,855 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:46:51,855 DEBUG [RS:0;aba5a916dfea:33681 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,855 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,33681,1732020411300' 2024-11-19T12:46:51,855 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,33681,1732020411300 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,33681,1732020411300' 2024-11-19T12:46:51,856 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:46:51,857 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:46:51,857 DEBUG [RS:0;aba5a916dfea:33681 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:46:51,857 INFO [RS:0;aba5a916dfea:33681 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:46:51,857 INFO [RS:0;aba5a916dfea:33681 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:46:51,865 WARN [aba5a916dfea:34725 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:46:51,960 INFO [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C33681%2C1732020411300, suffix=, logDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/WALs/aba5a916dfea,33681,1732020411300, archiveDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/oldWALs, maxLogs=32 2024-11-19T12:46:51,963 INFO [RS:0;aba5a916dfea:33681 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C33681%2C1732020411300.1732020411963 2024-11-19T12:46:51,972 INFO [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/WALs/aba5a916dfea,33681,1732020411300/aba5a916dfea%2C33681%2C1732020411300.1732020411963 2024-11-19T12:46:51,976 DEBUG [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45901:45901),(127.0.0.1/127.0.0.1:39291:39291)] 2024-11-19T12:46:52,116 DEBUG [aba5a916dfea:34725 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:46:52,116 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,33681,1732020411300 2024-11-19T12:46:52,119 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,33681,1732020411300, state=OPENING 2024-11-19T12:46:52,165 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:46:52,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:52,174 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:52,175 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:46:52,175 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:52,175 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,33681,1732020411300}] 2024-11-19T12:46:52,175 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:52,330 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:46:52,332 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58115, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:46:52,337 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:46:52,337 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:52,340 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C33681%2C1732020411300.meta, suffix=.meta, logDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/WALs/aba5a916dfea,33681,1732020411300, archiveDir=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/oldWALs, maxLogs=32 2024-11-19T12:46:52,343 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C33681%2C1732020411300.meta.1732020412343.meta 2024-11-19T12:46:52,357 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/WALs/aba5a916dfea,33681,1732020411300/aba5a916dfea%2C33681%2C1732020411300.meta.1732020412343.meta 2024-11-19T12:46:52,359 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39291:39291),(127.0.0.1/127.0.0.1:45901:45901)] 2024-11-19T12:46:52,360 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:46:52,361 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:46:52,361 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:46:52,363 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:46:52,364 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:46:52,364 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:52,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:52,365 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:46:52,366 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:46:52,366 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:52,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:52,367 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:46:52,368 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:46:52,369 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:52,369 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:52,370 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:46:52,371 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:46:52,371 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:52,372 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:52,372 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:46:52,373 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740 2024-11-19T12:46:52,375 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740 2024-11-19T12:46:52,376 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:46:52,376 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:46:52,377 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:46:52,379 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:46:52,380 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=705090, jitterRate=-0.10343281924724579}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:46:52,380 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:46:52,381 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020412361Writing region info on filesystem at 1732020412361Initializing all the Stores at 1732020412363 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020412363Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020412363Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020412363Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020412363Cleaning up temporary data from old regions at 1732020412376 (+13 ms)Running coprocessor post-open hooks at 1732020412380 (+4 ms)Region opened successfully at 1732020412381 (+1 ms) 2024-11-19T12:46:52,382 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020412330 2024-11-19T12:46:52,386 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:46:52,386 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:46:52,387 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,33681,1732020411300 2024-11-19T12:46:52,388 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,33681,1732020411300, state=OPEN 2024-11-19T12:46:52,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:46:52,429 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:46:52,429 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,33681,1732020411300 2024-11-19T12:46:52,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:52,429 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:52,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:46:52,433 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,33681,1732020411300 in 254 msec 2024-11-19T12:46:52,437 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:46:52,438 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 721 msec 2024-11-19T12:46:52,439 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:52,439 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:46:52,441 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:46:52,441 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,33681,1732020411300, seqNum=-1] 2024-11-19T12:46:52,441 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:46:52,443 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43811, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:46:52,450 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 796 msec 2024-11-19T12:46:52,451 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020412451, completionTime=-1 2024-11-19T12:46:52,451 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:46:52,451 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:46:52,453 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:46:52,453 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020472453 2024-11-19T12:46:52,453 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020532453 2024-11-19T12:46:52,453 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:34725, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,454 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,456 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:46:52,460 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.086sec 2024-11-19T12:46:52,460 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:46:52,460 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:46:52,460 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:46:52,461 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:46:52,461 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:46:52,461 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:46:52,461 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:46:52,463 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:46:52,464 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:46:52,464 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34725,1732020411165-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:52,535 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4b53d9a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:52,535 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,34725,-1 for getting cluster id 2024-11-19T12:46:52,535 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:46:52,539 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '0b30a533-b786-4b68-a803-4a69d6a9a98f' 2024-11-19T12:46:52,540 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:46:52,540 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "0b30a533-b786-4b68-a803-4a69d6a9a98f" 2024-11-19T12:46:52,541 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3d956d2c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:52,541 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,34725,-1] 2024-11-19T12:46:52,542 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:46:52,543 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:52,545 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47490, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:46:52,546 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@39bdb4ef, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:52,547 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:46:52,548 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,33681,1732020411300, seqNum=-1] 2024-11-19T12:46:52,548 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:46:52,550 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60050, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:46:52,552 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,34725,1732020411165 2024-11-19T12:46:52,553 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:52,557 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:46:52,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:46:52,557 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:46:52,557 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:52,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:52,557 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:52,557 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:46:52,558 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:46:52,558 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1891077224, stopped=false 2024-11-19T12:46:52,558 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,34725,1732020411165 2024-11-19T12:46:52,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:52,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:52,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:52,582 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:46:52,582 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:52,582 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:46:52,582 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:52,582 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:52,582 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:52,583 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:52,583 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,33681,1732020411300' ***** 2024-11-19T12:46:52,583 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:46:52,583 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,33681,1732020411300 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:46:52,583 INFO [RS:0;aba5a916dfea:33681 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:33681. 2024-11-19T12:46:52,584 DEBUG [RS:0;aba5a916dfea:33681 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:46:52,584 DEBUG [RS:0;aba5a916dfea:33681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:52,584 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:46:52,584 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:46:52,584 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:46:52,584 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:46:52,584 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T12:46:52,584 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:46:52,584 DEBUG [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T12:46:52,584 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:46:52,585 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:46:52,585 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:46:52,585 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:46:52,585 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:46:52,585 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T12:46:52,605 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/.tmp/ns/f08ae4c07a274dd1be617dd75296fb4d is 43, key is default/ns:d/1732020412444/Put/seqid=0 2024-11-19T12:46:52,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741835_1011 (size=5153) 2024-11-19T12:46:52,611 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741835_1011 (size=5153) 2024-11-19T12:46:52,612 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/.tmp/ns/f08ae4c07a274dd1be617dd75296fb4d 2024-11-19T12:46:52,621 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/.tmp/ns/f08ae4c07a274dd1be617dd75296fb4d as hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/ns/f08ae4c07a274dd1be617dd75296fb4d 2024-11-19T12:46:52,630 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/ns/f08ae4c07a274dd1be617dd75296fb4d, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T12:46:52,631 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false 2024-11-19T12:46:52,632 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:46:52,639 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T12:46:52,640 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:46:52,640 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:52,640 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020412584Running coprocessor pre-close hooks at 1732020412584Disabling compacts and flushes for region at 1732020412584Disabling writes for close at 1732020412585 (+1 ms)Obtaining lock to block concurrent updates at 1732020412585Preparing flush snapshotting stores in 1588230740 at 1732020412585Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732020412585Flushing stores of hbase:meta,,1.1588230740 at 1732020412586 (+1 ms)Flushing 1588230740/ns: creating writer at 1732020412587 (+1 ms)Flushing 1588230740/ns: appending metadata at 1732020412604 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732020412604Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@59bdfd69: reopening flushed file at 1732020412619 (+15 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 46ms, sequenceid=6, compaction requested=false at 1732020412632 (+13 ms)Writing region close event to WAL at 1732020412634 (+2 ms)Running coprocessor post-close hooks at 1732020412640 (+6 ms)Closed at 1732020412640 2024-11-19T12:46:52,640 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:52,785 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,33681,1732020411300; all regions closed. 2024-11-19T12:46:52,786 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,786 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,786 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,786 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,787 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,790 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741834_1010 (size=1152) 2024-11-19T12:46:52,791 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741834_1010 (size=1152) 2024-11-19T12:46:52,795 DEBUG [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/oldWALs 2024-11-19T12:46:52,795 INFO [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C33681%2C1732020411300.meta:.meta(num 1732020412343) 2024-11-19T12:46:52,795 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,795 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,795 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,796 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,796 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:52,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741833_1009 (size=93) 2024-11-19T12:46:52,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741833_1009 (size=93) 2024-11-19T12:46:52,900 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:46:52,900 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:46:53,201 DEBUG [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/oldWALs 2024-11-19T12:46:53,201 INFO [RS:0;aba5a916dfea:33681 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C33681%2C1732020411300:(num 1732020411963) 2024-11-19T12:46:53,201 DEBUG [RS:0;aba5a916dfea:33681 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:53,201 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:46:53,202 INFO [RS:0;aba5a916dfea:33681 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:46:53,202 INFO [RS:0;aba5a916dfea:33681 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:46:53,202 INFO [RS:0;aba5a916dfea:33681 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:46:53,202 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:46:53,202 INFO [RS:0;aba5a916dfea:33681 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33681 2024-11-19T12:46:53,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:46:53,215 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,33681,1732020411300 2024-11-19T12:46:53,215 INFO [RS:0;aba5a916dfea:33681 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:46:53,223 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,33681,1732020411300] 2024-11-19T12:46:53,231 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,33681,1732020411300 already deleted, retry=false 2024-11-19T12:46:53,232 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,33681,1732020411300 expired; onlineServers=0 2024-11-19T12:46:53,232 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,34725,1732020411165' ***** 2024-11-19T12:46:53,232 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:46:53,232 INFO [M:0;aba5a916dfea:34725 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:46:53,232 INFO [M:0;aba5a916dfea:34725 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:46:53,232 DEBUG [M:0;aba5a916dfea:34725 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:46:53,232 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:46:53,232 DEBUG [M:0;aba5a916dfea:34725 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:46:53,232 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020411661 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020411661,5,FailOnTimeoutGroup] 2024-11-19T12:46:53,232 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020411661 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020411661,5,FailOnTimeoutGroup] 2024-11-19T12:46:53,232 INFO [M:0;aba5a916dfea:34725 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:46:53,233 INFO [M:0;aba5a916dfea:34725 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:46:53,233 DEBUG [M:0;aba5a916dfea:34725 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:46:53,233 INFO [M:0;aba5a916dfea:34725 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:46:53,233 INFO [M:0;aba5a916dfea:34725 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:46:53,233 INFO [M:0;aba5a916dfea:34725 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:46:53,233 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:46:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:46:53,240 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:53,240 DEBUG [M:0;aba5a916dfea:34725 {}] zookeeper.ZKUtil(347): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:46:53,240 WARN [M:0;aba5a916dfea:34725 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:46:53,241 INFO [M:0;aba5a916dfea:34725 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/.lastflushedseqids 2024-11-19T12:46:53,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741836_1012 (size=99) 2024-11-19T12:46:53,248 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741836_1012 (size=99) 2024-11-19T12:46:53,249 INFO [M:0;aba5a916dfea:34725 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:46:53,249 INFO [M:0;aba5a916dfea:34725 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:46:53,249 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:46:53,249 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:53,249 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:53,249 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:46:53,249 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:53,249 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T12:46:53,269 DEBUG [M:0;aba5a916dfea:34725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3765beef5f1b490ea064a1809c122e6a is 82, key is hbase:meta,,1/info:regioninfo/1732020412387/Put/seqid=0 2024-11-19T12:46:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741837_1013 (size=5672) 2024-11-19T12:46:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741837_1013 (size=5672) 2024-11-19T12:46:53,275 INFO [M:0;aba5a916dfea:34725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3765beef5f1b490ea064a1809c122e6a 2024-11-19T12:46:53,300 DEBUG [M:0;aba5a916dfea:34725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0da78ffaa5d4c07b0df276e017f3c31 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732020412450/Put/seqid=0 2024-11-19T12:46:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741838_1014 (size=5275) 2024-11-19T12:46:53,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741838_1014 (size=5275) 2024-11-19T12:46:53,307 INFO [M:0;aba5a916dfea:34725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0da78ffaa5d4c07b0df276e017f3c31 2024-11-19T12:46:53,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:53,323 INFO [RS:0;aba5a916dfea:33681 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:46:53,324 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:33681-0x101546badce0001, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:53,324 INFO [RS:0;aba5a916dfea:33681 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,33681,1732020411300; zookeeper connection closed. 2024-11-19T12:46:53,327 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@74d8c576 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@74d8c576 2024-11-19T12:46:53,327 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:46:53,335 DEBUG [M:0;aba5a916dfea:34725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1995133a6d84f4bbbe136eb895ea56d is 69, key is aba5a916dfea,33681,1732020411300/rs:state/1732020411794/Put/seqid=0 2024-11-19T12:46:53,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741839_1015 (size=5156) 2024-11-19T12:46:53,341 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741839_1015 (size=5156) 2024-11-19T12:46:53,341 INFO [M:0;aba5a916dfea:34725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1995133a6d84f4bbbe136eb895ea56d 2024-11-19T12:46:53,363 DEBUG [M:0;aba5a916dfea:34725 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e373c6144f24326a6a17511608f13f6 is 52, key is load_balancer_on/state:d/1732020412555/Put/seqid=0 2024-11-19T12:46:53,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741840_1016 (size=5056) 2024-11-19T12:46:53,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741840_1016 (size=5056) 2024-11-19T12:46:53,369 INFO [M:0;aba5a916dfea:34725 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e373c6144f24326a6a17511608f13f6 2024-11-19T12:46:53,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:46:53,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:46:53,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-19T12:46:53,378 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/3765beef5f1b490ea064a1809c122e6a as hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3765beef5f1b490ea064a1809c122e6a 2024-11-19T12:46:53,386 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/3765beef5f1b490ea064a1809c122e6a, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T12:46:53,387 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/b0da78ffaa5d4c07b0df276e017f3c31 as hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0da78ffaa5d4c07b0df276e017f3c31 2024-11-19T12:46:53,394 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/b0da78ffaa5d4c07b0df276e017f3c31, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T12:46:53,396 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d1995133a6d84f4bbbe136eb895ea56d as hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1995133a6d84f4bbbe136eb895ea56d 2024-11-19T12:46:53,402 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d1995133a6d84f4bbbe136eb895ea56d, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T12:46:53,403 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/7e373c6144f24326a6a17511608f13f6 as hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e373c6144f24326a6a17511608f13f6 2024-11-19T12:46:53,411 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:34279/user/jenkins/test-data/a290c2e4-d1a7-6eab-07d6-dccb9d58c714/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/7e373c6144f24326a6a17511608f13f6, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T12:46:53,412 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=29, compaction requested=false 2024-11-19T12:46:53,415 INFO [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:53,415 DEBUG [M:0;aba5a916dfea:34725 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020413249Disabling compacts and flushes for region at 1732020413249Disabling writes for close at 1732020413249Obtaining lock to block concurrent updates at 1732020413249Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020413249Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732020413250 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020413251 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020413251Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020413268 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020413269 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020413282 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020413300 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020413300Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020413318 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020413334 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020413334Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020413347 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020413362 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020413362Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@486972c3: reopening flushed file at 1732020413377 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44a3cf54: reopening flushed file at 1732020413386 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17d3c3b9: reopening flushed file at 1732020413394 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3255e476: reopening flushed file at 1732020413402 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 163ms, sequenceid=29, compaction requested=false at 1732020413412 (+10 ms)Writing region close event to WAL at 1732020413415 (+3 ms)Closed at 1732020413415 2024-11-19T12:46:53,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:53,416 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:53,416 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:53,416 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:53,416 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:46:53,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41259 is added to blk_1073741830_1006 (size=10311) 2024-11-19T12:46:53,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34293 is added to blk_1073741830_1006 (size=10311) 2024-11-19T12:46:53,422 INFO [M:0;aba5a916dfea:34725 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:46:53,422 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:46:53,422 INFO [M:0;aba5a916dfea:34725 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34725 2024-11-19T12:46:53,422 INFO [M:0;aba5a916dfea:34725 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:46:53,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:53,548 INFO [M:0;aba5a916dfea:34725 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:46:53,548 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34725-0x101546badce0000, quorum=127.0.0.1:50359, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:46:53,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@11ff445e{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:53,551 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5e63263c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:53,551 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:53,551 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4fcb1c4b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:53,552 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3168153a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:53,553 WARN [BP-1626899054-172.17.0.2-1732020409518 heartbeating to localhost/127.0.0.1:34279 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:46:53,553 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:46:53,553 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:46:53,553 WARN [BP-1626899054-172.17.0.2-1732020409518 heartbeating to localhost/127.0.0.1:34279 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1626899054-172.17.0.2-1732020409518 (Datanode Uuid b8341f9d-60fa-4208-8019-a80e6e33d00b) service to localhost/127.0.0.1:34279 2024-11-19T12:46:53,553 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data3/current/BP-1626899054-172.17.0.2-1732020409518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:53,554 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data4/current/BP-1626899054-172.17.0.2-1732020409518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:53,554 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:46:53,556 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7e335929{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:53,557 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@726508a1{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:53,557 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:53,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@33e82987{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:53,557 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@345536c6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:53,558 WARN [BP-1626899054-172.17.0.2-1732020409518 heartbeating to localhost/127.0.0.1:34279 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:46:53,559 WARN [BP-1626899054-172.17.0.2-1732020409518 heartbeating to localhost/127.0.0.1:34279 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1626899054-172.17.0.2-1732020409518 (Datanode Uuid b254b1f2-cc98-47d0-a26f-483cab6860a8) service to localhost/127.0.0.1:34279 2024-11-19T12:46:53,559 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:46:53,559 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:46:53,559 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data1/current/BP-1626899054-172.17.0.2-1732020409518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:53,559 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/cluster_7489f232-b547-a0a0-81d0-6f07cb695d01/data/data2/current/BP-1626899054-172.17.0.2-1732020409518 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:46:53,560 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:46:53,565 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2281152e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:46:53,566 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3485277{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:46:53,566 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:46:53,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1ea6e47a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:46:53,566 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@43fab4bf{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir/,STOPPED} 2024-11-19T12:46:53,573 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.log.dir so I do NOT create it in target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/5374b082-1f08-6fc5-0ca6-c8be46fabb90/hadoop.tmp.dir so I do NOT create it in target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258, deleteOnExit=true 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:46:53,597 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/test.cache.data in system properties and HBase conf 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:46:53,598 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:46:53,598 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:46:53,599 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:46:53,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:46:53,600 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:46:53,614 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:46:53,823 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:46:53,846 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,851 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,874 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-19T12:46:53,877 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,894 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,895 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,896 WARN [Time-limited test {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:46:53,925 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:53,932 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:53,934 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:53,935 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:53,935 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:46:53,939 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:53,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@75b4bf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:53,940 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1cbabe3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:54,048 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@a95d0{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-36389-hadoop-hdfs-3_4_1-tests_jar-_-any-8246164033340021243/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:46:54,048 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1222fb27{HTTP/1.1, (http/1.1)}{localhost:36389} 2024-11-19T12:46:54,048 INFO [Time-limited test {}] server.Server(415): Started @107746ms 2024-11-19T12:46:54,062 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:46:54,299 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:54,302 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:54,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:54,303 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:54,303 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:46:54,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1998e8d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:54,305 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@45408de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:54,438 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@20e45d77{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-41307-hadoop-hdfs-3_4_1-tests_jar-_-any-3479350956064893667/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:54,438 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@371f43b7{HTTP/1.1, (http/1.1)}{localhost:41307} 2024-11-19T12:46:54,439 INFO [Time-limited test {}] server.Server(415): Started @108136ms 2024-11-19T12:46:54,441 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:46:54,498 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:46:54,502 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:46:54,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:46:54,507 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:46:54,507 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:46:54,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@34bdf3a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:46:54,508 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@781a23c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:46:54,615 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@16656ed0{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-36003-hadoop-hdfs-3_4_1-tests_jar-_-any-16888400728237478827/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:46:54,616 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7dac475c{HTTP/1.1, (http/1.1)}{localhost:36003} 2024-11-19T12:46:54,616 INFO [Time-limited test {}] server.Server(415): Started @108313ms 2024-11-19T12:46:54,618 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:46:55,165 WARN [Thread-670 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data1/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:55,165 WARN [Thread-671 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data2/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:55,185 WARN [Thread-635 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:46:55,187 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5238a7ff7af685a with lease ID 0x5e64b62b73df066f: Processing first storage report for DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad from datanode DatanodeRegistration(127.0.0.1:40473, datanodeUuid=35cdafb0-7b4e-40f9-bc8e-146aa436c13c, infoPort=42163, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:46:55,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5238a7ff7af685a with lease ID 0x5e64b62b73df066f: from storage DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad node DatanodeRegistration(127.0.0.1:40473, datanodeUuid=35cdafb0-7b4e-40f9-bc8e-146aa436c13c, infoPort=42163, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:55,188 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd5238a7ff7af685a with lease ID 0x5e64b62b73df066f: Processing first storage report for DS-b59784ef-fcc8-4355-91a4-49702402830f from datanode DatanodeRegistration(127.0.0.1:40473, datanodeUuid=35cdafb0-7b4e-40f9-bc8e-146aa436c13c, infoPort=42163, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:46:55,188 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd5238a7ff7af685a with lease ID 0x5e64b62b73df066f: from storage DS-b59784ef-fcc8-4355-91a4-49702402830f node DatanodeRegistration(127.0.0.1:40473, datanodeUuid=35cdafb0-7b4e-40f9-bc8e-146aa436c13c, infoPort=42163, infoSecurePort=0, ipcPort=43633, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:55,388 WARN [Thread-682 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:55,388 WARN [Thread-683 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:46:55,407 WARN [Thread-658 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:46:55,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7874505cf270d3da with lease ID 0x5e64b62b73df0670: Processing first storage report for DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef from datanode DatanodeRegistration(127.0.0.1:36863, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=37619, infoSecurePort=0, ipcPort=41627, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:46:55,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7874505cf270d3da with lease ID 0x5e64b62b73df0670: from storage DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef node DatanodeRegistration(127.0.0.1:36863, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=37619, infoSecurePort=0, ipcPort=41627, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:55,409 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7874505cf270d3da with lease ID 0x5e64b62b73df0670: Processing first storage report for DS-31d3546d-867b-4637-be0c-f8ec157cc6a5 from datanode DatanodeRegistration(127.0.0.1:36863, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=37619, infoSecurePort=0, ipcPort=41627, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:46:55,409 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7874505cf270d3da with lease ID 0x5e64b62b73df0670: from storage DS-31d3546d-867b-4637-be0c-f8ec157cc6a5 node DatanodeRegistration(127.0.0.1:36863, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=37619, infoSecurePort=0, ipcPort=41627, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:46:55,457 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce 2024-11-19T12:46:55,461 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/zookeeper_0, clientPort=49346, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:46:55,463 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49346 2024-11-19T12:46:55,463 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:46:55,481 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:46:55,482 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0 with version=8 2024-11-19T12:46:55,482 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:46:55,485 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:46:55,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,485 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,486 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:46:55,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,486 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:46:55,486 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:46:55,486 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:46:55,487 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:32943 2024-11-19T12:46:55,488 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:32943 connecting to ZooKeeper ensemble=127.0.0.1:49346 2024-11-19T12:46:55,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:329430x0, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:46:55,536 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:32943-0x101546bbea90000 connected 2024-11-19T12:46:55,598 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,600 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,603 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:55,603 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0, hbase.cluster.distributed=false 2024-11-19T12:46:55,605 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:46:55,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=32943 2024-11-19T12:46:55,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=32943 2024-11-19T12:46:55,605 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=32943 2024-11-19T12:46:55,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=32943 2024-11-19T12:46:55,606 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=32943 2024-11-19T12:46:55,622 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:46:55,622 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:46:55,623 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:46:55,623 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43765 2024-11-19T12:46:55,625 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43765 connecting to ZooKeeper ensemble=127.0.0.1:49346 2024-11-19T12:46:55,626 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,627 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,639 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:437650x0, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:46:55,640 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:437650x0, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:46:55,640 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43765-0x101546bbea90001 connected 2024-11-19T12:46:55,640 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:46:55,641 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:46:55,641 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:46:55,643 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:46:55,643 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T12:46:55,644 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43765 2024-11-19T12:46:55,646 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43765 2024-11-19T12:46:55,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T12:46:55,649 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43765 2024-11-19T12:46:55,664 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:32943 2024-11-19T12:46:55,664 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,32943,1732020415485 2024-11-19T12:46:55,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:55,673 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:55,674 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,32943,1732020415485 2024-11-19T12:46:55,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:55,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:46:55,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:55,682 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:46:55,682 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,32943,1732020415485 from backup master directory 2024-11-19T12:46:55,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,32943,1732020415485 2024-11-19T12:46:55,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:55,689 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:46:55,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:46:55,689 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,32943,1732020415485 2024-11-19T12:46:55,695 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/hbase.id] with ID: 1d0c25a0-e324-4b3e-9770-933ccdfa835b 2024-11-19T12:46:55,695 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/.tmp/hbase.id 2024-11-19T12:46:55,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:46:55,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:46:55,703 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/.tmp/hbase.id]:[hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/hbase.id] 2024-11-19T12:46:55,717 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:55,717 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:46:55,719 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T12:46:55,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:55,731 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:55,738 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:46:55,739 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:46:55,739 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:46:55,740 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:46:55,740 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:55,751 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:46:55,752 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:46:55,753 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store 2024-11-19T12:46:55,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:46:55,765 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:46:56,166 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:56,167 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:46:56,167 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:56,167 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:56,167 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:46:56,167 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:56,167 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:46:56,167 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020416166Disabling compacts and flushes for region at 1732020416166Disabling writes for close at 1732020416167 (+1 ms)Writing region close event to WAL at 1732020416167Closed at 1732020416167 2024-11-19T12:46:56,168 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/.initializing 2024-11-19T12:46:56,168 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485 2024-11-19T12:46:56,173 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C32943%2C1732020415485, suffix=, logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485, archiveDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/oldWALs, maxLogs=10 2024-11-19T12:46:56,174 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C32943%2C1732020415485.1732020416173 2024-11-19T12:46:56,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 2024-11-19T12:46:56,185 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42163:42163),(127.0.0.1/127.0.0.1:37619:37619)] 2024-11-19T12:46:56,186 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:46:56,186 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:56,187 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,187 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,189 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,190 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:46:56,190 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:56,191 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:46:56,193 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:56,193 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,195 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:46:56,195 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:56,196 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,197 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:46:56,197 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,198 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:56,198 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,199 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,200 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,201 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,201 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,202 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:46:56,203 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:46:56,208 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:46:56,209 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=858242, jitterRate=0.09131139516830444}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:46:56,211 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020416187Initializing all the Stores at 1732020416188 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020416188Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020416189 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020416189Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020416189Cleaning up temporary data from old regions at 1732020416201 (+12 ms)Region opened successfully at 1732020416211 (+10 ms) 2024-11-19T12:46:56,211 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:46:56,215 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5f707888, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:46:56,216 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:46:56,216 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:46:56,216 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:46:56,217 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:46:56,217 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:46:56,218 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:46:56,218 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:46:56,221 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:46:56,222 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:46:56,264 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:46:56,265 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:46:56,266 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:46:56,272 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:46:56,273 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:46:56,274 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:46:56,281 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:46:56,282 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:46:56,289 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:46:56,291 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:46:56,297 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:46:56,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:56,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:46:56,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,306 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,307 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,32943,1732020415485, sessionid=0x101546bbea90000, setting cluster-up flag (Was=false) 2024-11-19T12:46:56,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,323 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,348 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:46:56,349 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,32943,1732020415485 2024-11-19T12:46:56,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,364 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,389 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:46:56,391 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,32943,1732020415485 2024-11-19T12:46:56,393 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:46:56,396 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:56,396 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:46:56,396 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:46:56,397 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,32943,1732020415485 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:46:56,399 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,400 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020446400 2024-11-19T12:46:56,400 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:46:56,401 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,402 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:46:56,402 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:46:56,402 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:46:56,402 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:56,402 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:46:56,402 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:46:56,402 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:46:56,404 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,404 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:46:56,407 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020416402,5,FailOnTimeoutGroup] 2024-11-19T12:46:56,407 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020416407,5,FailOnTimeoutGroup] 2024-11-19T12:46:56,407 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,407 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:46:56,407 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,407 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,417 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:46:56,418 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:46:56,419 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:46:56,419 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0 2024-11-19T12:46:56,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:46:56,426 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:46:56,427 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:56,428 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:46:56,430 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:46:56,430 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,430 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:56,431 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:46:56,432 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:46:56,433 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:56,433 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:46:56,435 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:46:56,435 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:56,436 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:46:56,437 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:46:56,437 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:56,438 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:56,438 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:46:56,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740 2024-11-19T12:46:56,439 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740 2024-11-19T12:46:56,440 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:46:56,441 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:46:56,441 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:46:56,443 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:46:56,445 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:46:56,446 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=691353, jitterRate=-0.12090025842189789}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:46:56,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020416427Initializing all the Stores at 1732020416428 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020416428Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020416428Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020416428Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020416428Cleaning up temporary data from old regions at 1732020416441 (+13 ms)Region opened successfully at 1732020416447 (+6 ms) 2024-11-19T12:46:56,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:46:56,447 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:46:56,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:46:56,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:46:56,447 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:46:56,448 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:46:56,448 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020416447Disabling compacts and flushes for region at 1732020416447Disabling writes for close at 1732020416447Writing region close event to WAL at 1732020416447Closed at 1732020416447 2024-11-19T12:46:56,449 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:56,449 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:46:56,449 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:46:56,474 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(746): ClusterId : 1d0c25a0-e324-4b3e-9770-933ccdfa835b 2024-11-19T12:46:56,475 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:46:56,475 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:46:56,476 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:46:56,490 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:46:56,490 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:46:56,499 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:46:56,499 DEBUG [RS:0;aba5a916dfea:43765 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d5ba6f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:46:56,516 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:43765 2024-11-19T12:46:56,516 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:46:56,516 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:46:56,516 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:46:56,517 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,32943,1732020415485 with port=43765, startcode=1732020415622 2024-11-19T12:46:56,517 DEBUG [RS:0;aba5a916dfea:43765 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:46:56,519 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33133, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:46:56,520 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32943 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,520 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32943 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,522 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0 2024-11-19T12:46:56,522 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42615 2024-11-19T12:46:56,522 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:46:56,531 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:46:56,532 DEBUG [RS:0;aba5a916dfea:43765 {}] zookeeper.ZKUtil(111): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,532 WARN [RS:0;aba5a916dfea:43765 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:46:56,532 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,43765,1732020415622] 2024-11-19T12:46:56,532 INFO [RS:0;aba5a916dfea:43765 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:56,532 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,537 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:46:56,540 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:46:56,540 INFO [RS:0;aba5a916dfea:43765 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:46:56,540 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,541 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:46:56,542 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:46:56,542 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,542 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,543 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,543 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,543 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:56,543 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:56,543 DEBUG [RS:0;aba5a916dfea:43765 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:56,543 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,543 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,543 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,543 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,543 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,544 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,43765,1732020415622-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:46:56,557 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:46:56,558 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,43765,1732020415622-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,558 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,558 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.Replication(171): aba5a916dfea,43765,1732020415622 started 2024-11-19T12:46:56,573 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:56,573 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,43765,1732020415622, RpcServer on aba5a916dfea/172.17.0.2:43765, sessionid=0x101546bbea90001 2024-11-19T12:46:56,573 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:46:56,574 DEBUG [RS:0;aba5a916dfea:43765 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,574 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,43765,1732020415622' 2024-11-19T12:46:56,574 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:46:56,574 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:46:56,575 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:46:56,575 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:46:56,575 DEBUG [RS:0;aba5a916dfea:43765 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,575 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,43765,1732020415622' 2024-11-19T12:46:56,575 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:46:56,576 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:46:56,576 DEBUG [RS:0;aba5a916dfea:43765 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:46:56,576 INFO [RS:0;aba5a916dfea:43765 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:46:56,576 INFO [RS:0;aba5a916dfea:43765 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:46:56,626 WARN [aba5a916dfea:32943 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:46:56,679 INFO [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C43765%2C1732020415622, suffix=, logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622, archiveDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs, maxLogs=32 2024-11-19T12:46:56,680 INFO [RS:0;aba5a916dfea:43765 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020416679 2024-11-19T12:46:56,687 INFO [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 2024-11-19T12:46:56,695 DEBUG [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42163:42163),(127.0.0.1/127.0.0.1:37619:37619)] 2024-11-19T12:46:56,877 DEBUG [aba5a916dfea:32943 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:46:56,878 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:56,880 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,43765,1732020415622, state=OPENING 2024-11-19T12:46:56,897 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:46:56,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,906 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:46:56,906 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:46:56,907 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,43765,1732020415622}] 2024-11-19T12:46:56,907 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:56,907 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:57,060 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:46:57,063 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:40489, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:46:57,068 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:46:57,068 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:57,070 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C43765%2C1732020415622.meta, suffix=.meta, logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622, archiveDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs, maxLogs=32 2024-11-19T12:46:57,071 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta 2024-11-19T12:46:57,077 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta 2024-11-19T12:46:57,083 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42163:42163),(127.0.0.1/127.0.0.1:37619:37619)] 2024-11-19T12:46:57,087 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:46:57,087 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:46:57,087 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:46:57,087 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:46:57,087 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:46:57,087 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:57,088 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:46:57,088 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:46:57,089 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:46:57,090 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:46:57,090 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,091 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:57,091 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:46:57,092 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:46:57,092 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:57,093 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:46:57,094 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:46:57,094 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:57,095 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:46:57,096 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:46:57,096 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,096 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:46:57,096 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:46:57,097 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740 2024-11-19T12:46:57,099 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740 2024-11-19T12:46:57,100 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:46:57,100 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:46:57,101 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:46:57,103 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:46:57,104 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800937, jitterRate=0.018444523215293884}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:46:57,104 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:46:57,105 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020417088Writing region info on filesystem at 1732020417088Initializing all the Stores at 1732020417089 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020417089Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020417089Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020417089Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020417089Cleaning up temporary data from old regions at 1732020417100 (+11 ms)Running coprocessor post-open hooks at 1732020417104 (+4 ms)Region opened successfully at 1732020417105 (+1 ms) 2024-11-19T12:46:57,106 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020417060 2024-11-19T12:46:57,110 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:46:57,110 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:46:57,111 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:57,112 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,43765,1732020415622, state=OPEN 2024-11-19T12:46:57,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:46:57,154 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:46:57,154 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:57,154 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:57,154 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:46:57,160 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:46:57,161 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,43765,1732020415622 in 247 msec 2024-11-19T12:46:57,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:46:57,165 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 711 msec 2024-11-19T12:46:57,166 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:46:57,166 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:46:57,169 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:46:57,169 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,43765,1732020415622, seqNum=-1] 2024-11-19T12:46:57,169 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:46:57,171 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37297, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:46:57,178 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 781 msec 2024-11-19T12:46:57,178 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020417178, completionTime=-1 2024-11-19T12:46:57,178 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:46:57,178 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:46:57,180 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:46:57,180 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020477180 2024-11-19T12:46:57,180 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020537180 2024-11-19T12:46:57,180 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:46:57,180 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:32943, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,181 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,183 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.496sec 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:46:57,185 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:46:57,188 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:46:57,188 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:46:57,188 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,32943,1732020415485-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,278 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@54df7dcd, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:57,278 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,32943,-1 for getting cluster id 2024-11-19T12:46:57,278 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:46:57,281 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '1d0c25a0-e324-4b3e-9770-933ccdfa835b' 2024-11-19T12:46:57,282 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:46:57,282 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "1d0c25a0-e324-4b3e-9770-933ccdfa835b" 2024-11-19T12:46:57,282 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@781cdd7a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:57,282 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,32943,-1] 2024-11-19T12:46:57,283 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:46:57,283 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:46:57,285 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52712, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:46:57,285 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@48a35c8a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:46:57,286 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:46:57,287 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,43765,1732020415622, seqNum=-1] 2024-11-19T12:46:57,287 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:46:57,289 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:36294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:46:57,291 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,32943,1732020415485 2024-11-19T12:46:57,292 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:57,294 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:46:57,310 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:46:57,311 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:46:57,312 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42905 2024-11-19T12:46:57,314 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:42905 connecting to ZooKeeper ensemble=127.0.0.1:49346 2024-11-19T12:46:57,314 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:57,316 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:46:57,339 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:429050x0, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:46:57,340 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:429050x0, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-19T12:46:57,340 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-19T12:46:57,340 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:42905-0x101546bbea90002 connected 2024-11-19T12:46:57,341 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:46:57,342 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:46:57,343 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:46:57,345 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:46:57,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42905 2024-11-19T12:46:57,347 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42905 2024-11-19T12:46:57,348 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42905 2024-11-19T12:46:57,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42905 2024-11-19T12:46:57,352 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42905 2024-11-19T12:46:57,353 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(746): ClusterId : 1d0c25a0-e324-4b3e-9770-933ccdfa835b 2024-11-19T12:46:57,354 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:46:57,365 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:46:57,365 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:46:57,373 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:46:57,374 DEBUG [RS:1;aba5a916dfea:42905 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@280686c7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:46:57,385 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;aba5a916dfea:42905 2024-11-19T12:46:57,385 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:46:57,385 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:46:57,385 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:46:57,386 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,32943,1732020415485 with port=42905, startcode=1732020417310 2024-11-19T12:46:57,386 DEBUG [RS:1;aba5a916dfea:42905 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:46:57,388 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57921, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:46:57,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32943 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,388 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32943 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,390 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0 2024-11-19T12:46:57,390 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42615 2024-11-19T12:46:57,390 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:46:57,397 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:46:57,398 DEBUG [RS:1;aba5a916dfea:42905 {}] zookeeper.ZKUtil(111): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,398 WARN [RS:1;aba5a916dfea:42905 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:46:57,398 INFO [RS:1;aba5a916dfea:42905 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:46:57,398 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,42905,1732020417310] 2024-11-19T12:46:57,398 DEBUG [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,402 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:46:57,404 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:46:57,405 INFO [RS:1;aba5a916dfea:42905 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:46:57,405 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,405 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:46:57,406 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:46:57,406 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,406 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,406 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,406 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,406 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,406 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:57,407 DEBUG [RS:1;aba5a916dfea:42905 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:46:57,407 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,407 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,407 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,408 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,408 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,408 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42905,1732020417310-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:46:57,423 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:46:57,424 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42905,1732020417310-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,424 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,424 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.Replication(171): aba5a916dfea,42905,1732020417310 started 2024-11-19T12:46:57,438 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:46:57,439 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,42905,1732020417310, RpcServer on aba5a916dfea/172.17.0.2:42905, sessionid=0x101546bbea90002 2024-11-19T12:46:57,439 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:46:57,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;aba5a916dfea:42905,5,FailOnTimeoutGroup] 2024-11-19T12:46:57,439 DEBUG [RS:1;aba5a916dfea:42905 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,439 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,42905,1732020417310' 2024-11-19T12:46:57,439 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:46:57,439 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-19T12:46:57,439 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:46:57,440 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,42905,1732020417310 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,42905,1732020417310' 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:46:57,440 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:46:57,441 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,32943,1732020415485 2024-11-19T12:46:57,441 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37809924 2024-11-19T12:46:57,441 DEBUG [RS:1;aba5a916dfea:42905 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:46:57,441 INFO [RS:1;aba5a916dfea:42905 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:46:57,441 INFO [RS:1;aba5a916dfea:42905 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:46:57,441 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:46:57,443 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:52720, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:46:57,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T12:46:57,443 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T12:46:57,444 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:46:57,445 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T12:46:57,446 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:46:57,446 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,447 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-19T12:46:57,448 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:46:57,448 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:46:57,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741835_1011 (size=393) 2024-11-19T12:46:57,457 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741835_1011 (size=393) 2024-11-19T12:46:57,459 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 8de1681dda30e951b1c1e9986747cc57, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0 2024-11-19T12:46:57,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36863 is added to blk_1073741836_1012 (size=76) 2024-11-19T12:46:57,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40473 is added to blk_1073741836_1012 (size=76) 2024-11-19T12:46:57,466 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:57,467 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 8de1681dda30e951b1c1e9986747cc57, disabling compactions & flushes 2024-11-19T12:46:57,467 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,467 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,467 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. after waiting 0 ms 2024-11-19T12:46:57,467 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,467 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,467 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 8de1681dda30e951b1c1e9986747cc57: Waiting for close lock at 1732020417467Disabling compacts and flushes for region at 1732020417467Disabling writes for close at 1732020417467Writing region close event to WAL at 1732020417467Closed at 1732020417467 2024-11-19T12:46:57,469 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:46:57,469 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732020417469"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020417469"}]},"ts":"1732020417469"} 2024-11-19T12:46:57,472 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:46:57,473 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:46:57,474 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020417473"}]},"ts":"1732020417473"} 2024-11-19T12:46:57,476 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-19T12:46:57,477 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57, ASSIGN}] 2024-11-19T12:46:57,478 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57, ASSIGN 2024-11-19T12:46:57,480 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57, ASSIGN; state=OFFLINE, location=aba5a916dfea,43765,1732020415622; forceNewPlan=false, retain=false 2024-11-19T12:46:57,544 INFO [RS:1;aba5a916dfea:42905 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C42905%2C1732020417310, suffix=, logDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310, archiveDir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs, maxLogs=32 2024-11-19T12:46:57,545 INFO [RS:1;aba5a916dfea:42905 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C42905%2C1732020417310.1732020417545 2024-11-19T12:46:57,559 INFO [RS:1;aba5a916dfea:42905 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 2024-11-19T12:46:57,560 DEBUG [RS:1;aba5a916dfea:42905 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37619:37619),(127.0.0.1/127.0.0.1:42163:42163)] 2024-11-19T12:46:57,631 INFO [aba5a916dfea:32943 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-19T12:46:57,632 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8de1681dda30e951b1c1e9986747cc57, regionState=OPENING, regionLocation=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:57,635 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57, ASSIGN because future has completed 2024-11-19T12:46:57,636 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8de1681dda30e951b1c1e9986747cc57, server=aba5a916dfea,43765,1732020415622}] 2024-11-19T12:46:57,798 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,798 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 8de1681dda30e951b1c1e9986747cc57, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:46:57,799 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,799 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:46:57,799 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,799 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,802 INFO [StoreOpener-8de1681dda30e951b1c1e9986747cc57-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,804 INFO [StoreOpener-8de1681dda30e951b1c1e9986747cc57-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 8de1681dda30e951b1c1e9986747cc57 columnFamilyName info 2024-11-19T12:46:57,804 DEBUG [StoreOpener-8de1681dda30e951b1c1e9986747cc57-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:46:57,804 INFO [StoreOpener-8de1681dda30e951b1c1e9986747cc57-1 {}] regionserver.HStore(327): Store=8de1681dda30e951b1c1e9986747cc57/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:46:57,805 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,805 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,806 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,806 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,806 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,808 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,810 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:46:57,811 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 8de1681dda30e951b1c1e9986747cc57; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879157, jitterRate=0.11790657043457031}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:46:57,811 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:46:57,812 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 8de1681dda30e951b1c1e9986747cc57: Running coprocessor pre-open hook at 1732020417800Writing region info on filesystem at 1732020417800Initializing all the Stores at 1732020417801 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020417801Cleaning up temporary data from old regions at 1732020417806 (+5 ms)Running coprocessor post-open hooks at 1732020417811 (+5 ms)Region opened successfully at 1732020417812 (+1 ms) 2024-11-19T12:46:57,813 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57., pid=6, masterSystemTime=1732020417791 2024-11-19T12:46:57,816 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,816 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:46:57,817 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=8de1681dda30e951b1c1e9986747cc57, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,43765,1732020415622 2024-11-19T12:46:57,821 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 8de1681dda30e951b1c1e9986747cc57, server=aba5a916dfea,43765,1732020415622 because future has completed 2024-11-19T12:46:57,822 WARN [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=32943 {}] assignment.AssignmentManager(1543): Unable to acquire lock for regionNode state=OPEN, location=aba5a916dfea,43765,1732020415622, table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57. It is likely that another thread is currently holding the lock. To avoid deadlock, skip execution for now. 2024-11-19T12:46:57,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:46:57,826 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 8de1681dda30e951b1c1e9986747cc57, server=aba5a916dfea,43765,1732020415622 in 187 msec 2024-11-19T12:46:57,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:46:57,829 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=8de1681dda30e951b1c1e9986747cc57, ASSIGN in 350 msec 2024-11-19T12:46:57,831 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:46:57,831 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020417831"}]},"ts":"1732020417831"} 2024-11-19T12:46:57,833 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-19T12:46:57,835 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:46:57,837 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 391 msec 2024-11-19T12:47:02,591 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:47:02,593 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:02,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:02,611 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:02,612 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:02,621 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-19T12:47:03,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:47:03,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:47:03,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T12:47:03,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-19T12:47:03,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:47:03,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:47:07,509 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=32943 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:47:07,510 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-19T12:47:07,510 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-19T12:47:07,516 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T12:47:07,516 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:07,534 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:07,537 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:07,538 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:07,538 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:07,538 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:47:07,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7461e1e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:07,539 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2bcd68b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:07,642 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47557d13{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-38271-hadoop-hdfs-3_4_1-tests_jar-_-any-2541608118828962468/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:07,643 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@364e0d85{HTTP/1.1, (http/1.1)}{localhost:38271} 2024-11-19T12:47:07,643 INFO [Time-limited test {}] server.Server(415): Started @121340ms 2024-11-19T12:47:07,644 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:07,681 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:07,684 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:07,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:07,685 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:07,685 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:47:07,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3b8dc18b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:07,686 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5516c6f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:07,815 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78a4f6f8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-43259-hadoop-hdfs-3_4_1-tests_jar-_-any-5184160814554029047/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:07,815 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@33d24da8{HTTP/1.1, (http/1.1)}{localhost:43259} 2024-11-19T12:47:07,815 INFO [Time-limited test {}] server.Server(415): Started @121513ms 2024-11-19T12:47:07,816 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:07,855 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:07,859 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:07,860 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:07,860 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:07,860 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:47:07,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@740bf9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:07,863 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ae570a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:07,986 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1d4c7e0a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-40159-hadoop-hdfs-3_4_1-tests_jar-_-any-725871771966100383/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:07,986 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7c46b2b7{HTTP/1.1, (http/1.1)}{localhost:40159} 2024-11-19T12:47:07,986 INFO [Time-limited test {}] server.Server(415): Started @121684ms 2024-11-19T12:47:07,987 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:08,488 WARN [Thread-864 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,488 WARN [Thread-863 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,505 WARN [Thread-807 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f86d6b06d0ccdd5 with lease ID 0x5e64b62b73df0671: Processing first storage report for DS-3040ff47-9a5c-46c7-989a-d458c315c660 from datanode DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f86d6b06d0ccdd5 with lease ID 0x5e64b62b73df0671: from storage DS-3040ff47-9a5c-46c7-989a-d458c315c660 node DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x6f86d6b06d0ccdd5 with lease ID 0x5e64b62b73df0671: Processing first storage report for DS-26dd5711-5654-4c4b-8680-160c0724332b from datanode DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,509 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x6f86d6b06d0ccdd5 with lease ID 0x5e64b62b73df0671: from storage DS-26dd5711-5654-4c4b-8680-160c0724332b node DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,749 WARN [Thread-877 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data7/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,749 WARN [Thread-878 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data8/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,779 WARN [Thread-829 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:08,781 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67dd71cdca0a04f6 with lease ID 0x5e64b62b73df0672: Processing first storage report for DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd from datanode DatanodeRegistration(127.0.0.1:36929, datanodeUuid=3debe517-4014-456e-bf6e-4dde5c443d2f, infoPort=43269, infoSecurePort=0, ipcPort=35373, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67dd71cdca0a04f6 with lease ID 0x5e64b62b73df0672: from storage DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd node DatanodeRegistration(127.0.0.1:36929, datanodeUuid=3debe517-4014-456e-bf6e-4dde5c443d2f, infoPort=43269, infoSecurePort=0, ipcPort=35373, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x67dd71cdca0a04f6 with lease ID 0x5e64b62b73df0672: Processing first storage report for DS-1bee4e59-c6aa-40d9-a668-072df7a31136 from datanode DatanodeRegistration(127.0.0.1:36929, datanodeUuid=3debe517-4014-456e-bf6e-4dde5c443d2f, infoPort=43269, infoSecurePort=0, ipcPort=35373, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,782 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x67dd71cdca0a04f6 with lease ID 0x5e64b62b73df0672: from storage DS-1bee4e59-c6aa-40d9-a668-072df7a31136 node DatanodeRegistration(127.0.0.1:36929, datanodeUuid=3debe517-4014-456e-bf6e-4dde5c443d2f, infoPort=43269, infoSecurePort=0, ipcPort=35373, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,860 WARN [Thread-888 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data9/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,860 WARN [Thread-889 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data10/current/BP-212595389-172.17.0.2-1732020413628/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:08,878 WARN [Thread-852 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ba5e7d963495427 with lease ID 0x5e64b62b73df0673: Processing first storage report for DS-4e0e02f3-66e5-48ac-82f0-883129507c11 from datanode DatanodeRegistration(127.0.0.1:37729, datanodeUuid=232d1253-279c-4cb5-b6c4-68ae8e309994, infoPort=36177, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ba5e7d963495427 with lease ID 0x5e64b62b73df0673: from storage DS-4e0e02f3-66e5-48ac-82f0-883129507c11 node DatanodeRegistration(127.0.0.1:37729, datanodeUuid=232d1253-279c-4cb5-b6c4-68ae8e309994, infoPort=36177, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x4ba5e7d963495427 with lease ID 0x5e64b62b73df0673: Processing first storage report for DS-c515659a-b4d0-41c1-90bd-64f54c4b47ea from datanode DatanodeRegistration(127.0.0.1:37729, datanodeUuid=232d1253-279c-4cb5-b6c4-68ae8e309994, infoPort=36177, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628) 2024-11-19T12:47:08,881 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x4ba5e7d963495427 with lease ID 0x5e64b62b73df0673: from storage DS-c515659a-b4d0-41c1-90bd-64f54c4b47ea node DatanodeRegistration(127.0.0.1:37729, datanodeUuid=232d1253-279c-4cb5-b6c4-68ae8e309994, infoPort=36177, infoSecurePort=0, ipcPort=40443, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:08,924 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,924 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,924 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 java.io.IOException: Bad response ERROR for BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 from datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,925 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 block BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:08,925 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 block BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:08,925 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta block BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:08,925 WARN [PacketResponder: BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36863] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,927 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,927 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 block BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:08,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:56828 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40473:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56828 dst: /127.0.0.1:40473 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,927 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:56814 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40473:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56814 dst: /127.0.0.1:40473 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,928 WARN [PacketResponder: BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36863] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,928 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:60732 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60732 dst: /127.0.0.1:36863 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,928 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:56788 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40473:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56788 dst: /127.0.0.1:40473 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,928 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:60740 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60740 dst: /127.0.0.1:36863 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_770947390_22 at /127.0.0.1:60780 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60780 dst: /127.0.0.1:36863 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,929 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_770947390_22 at /127.0.0.1:56872 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40473:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56872 dst: /127.0.0.1:40473 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,930 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:60710 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36863:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60710 dst: /127.0.0.1:36863 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:08,931 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@16656ed0{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:08,932 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7dac475c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:08,932 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:08,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@781a23c4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:08,932 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@34bdf3a1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:08,933 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:08,933 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 564cbb18-661f-470f-a689-c2b0f9d799ea) service to localhost/127.0.0.1:42615 2024-11-19T12:47:08,934 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:08,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:08,935 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:08,935 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:08,935 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:08,936 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 block BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,936 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta block BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,936 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 block BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@20e45d77{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:08,941 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@371f43b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:08,942 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:08,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@45408de{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:08,942 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 block BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,942 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1998e8d0{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:08,943 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:08,943 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:08,943 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 35cdafb0-7b4e-40f9-bc8e-146aa436c13c) service to localhost/127.0.0.1:42615 2024-11-19T12:47:08,943 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:08,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data1/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:08,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data2/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:08,944 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:08,948 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57., hostname=aba5a916dfea,43765,1732020415622, seqNum=2] 2024-11-19T12:47:08,949 ERROR [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0-prefix:aba5a916dfea,43765,1732020415622 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,949 WARN [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0-prefix:aba5a916dfea,43765,1732020415622 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,950 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C43765%2C1732020415622:(num 1732020416679) roll requested 2024-11-19T12:47:08,950 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020428950 2024-11-19T12:47:08,956 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:08,956 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:08,956 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:08,956 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:08,956 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:08,957 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 2024-11-19T12:47:08,957 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,957 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:08,958 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-19T12:47:08,958 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-19T12:47:08,958 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 2024-11-19T12:47:08,961 WARN [IPC Server handler 0 on default port 42615 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-19T12:47:08,965 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 after 5ms 2024-11-19T12:47:08,967 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36177:36177),(127.0.0.1/127.0.0.1:43269:43269)] 2024-11-19T12:47:08,967 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:09,408 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:09,741 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:10,967 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:10,969 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 2024-11-19T12:47:10,970 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:10,970 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 block BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:10,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:50348 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:37729:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:50348 dst: /127.0.0.1:37729 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:10,971 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:54746 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:36929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54746 dst: /127.0.0.1:36929 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:11,017 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1d4c7e0a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:11,018 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7c46b2b7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:11,018 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:11,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ae570a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:11,018 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@740bf9ab{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:11,019 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:11,019 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:11,019 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 232d1253-279c-4cb5-b6c4-68ae8e309994) service to localhost/127.0.0.1:42615 2024-11-19T12:47:11,019 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:11,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data9/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:11,020 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data10/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:11,020 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:11,409 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:11,742 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:12,966 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 after 4008ms 2024-11-19T12:47:12,968 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]] 2024-11-19T12:47:12,968 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:12,969 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C43765%2C1732020415622:(num 1732020428950) roll requested 2024-11-19T12:47:12,969 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020432969 2024-11-19T12:47:12,973 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:12,974 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:12,974 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741839_1021 2024-11-19T12:47:12,977 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:12,981 WARN [Thread-908 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741840_1022 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:12,982 WARN [Thread-908 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:12,982 WARN [Thread-908 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741840_1022 2024-11-19T12:47:12,982 WARN [Thread-908 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:12,987 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:12,987 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:12,987 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:12,987 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:12,988 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:12,988 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020432969 2024-11-19T12:47:12,989 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42621:42621),(127.0.0.1/127.0.0.1:43269:43269)] 2024-11-19T12:47:12,989 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:12,989 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 is not closed yet, will try archiving it next time 2024-11-19T12:47:12,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36929 is added to blk_1073741838_1020 (size=2431) 2024-11-19T12:47:13,026 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:47:13,390 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:13,409 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:13,742 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:14,989 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,031 WARN [ResponseProcessor for block BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023 java.io.IOException: Bad response ERROR for BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023 from datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,031 WARN [DataStreamer for file /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020432969 block BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:15,031 WARN [PacketResponder: BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36929] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53644 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53644 dst: /127.0.0.1:46381 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,032 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53608 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1023] {}] datanode.DataXceiver(331): 127.0.0.1:36929:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53608 dst: /127.0.0.1:36929 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,067 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78a4f6f8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:15,068 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@33d24da8{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:15,068 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:15,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5516c6f2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:15,068 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3b8dc18b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:15,070 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:15,070 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 3debe517-4014-456e-bf6e-4dde5c443d2f) service to localhost/127.0.0.1:42615 2024-11-19T12:47:15,070 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:15,071 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:15,071 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data7/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:15,072 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data8/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:15,072 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:15,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:15,082 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:47:15,104 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/5f1fdfa4b242410690aecb2ddd30f3b3 is 1080, key is row0002/info:/1732020431022/Put/seqid=0 2024-11-19T12:47:15,106 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,107 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:15,107 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741842_1025 2024-11-19T12:47:15,107 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:15,109 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,109 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:15,109 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741843_1026 2024-11-19T12:47:15,110 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:15,112 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40473 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,112 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53664 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027 to mirror 127.0.0.1:40473 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,112 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:15,113 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027 2024-11-19T12:47:15,113 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53664 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:15,113 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53664 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53664 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,113 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:15,116 WARN [Thread-917 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741845_1028 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36863 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53672 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028 to mirror 127.0.0.1:36863 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,116 WARN [Thread-917 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:15,116 WARN [Thread-917 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028 2024-11-19T12:47:15,116 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53672 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028] {}] datanode.BlockReceiver(316): Block 1073741845 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:15,116 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53672 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741845_1028] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53672 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,117 WARN [Thread-917 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:15,118 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:15,118 WARN [IPC Server handler 2 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:15,118 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:15,122 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741846_1029 (size=10347) 2024-11-19T12:47:15,409 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,523 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/5f1fdfa4b242410690aecb2ddd30f3b3 2024-11-19T12:47:15,524 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741846_1029 to 127.0.0.1:37729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/5f1fdfa4b242410690aecb2ddd30f3b3 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3 2024-11-19T12:47:15,540 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3, entries=5, sequenceid=11, filesize=10.1 K 2024-11-19T12:47:15,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 8de1681dda30e951b1c1e9986747cc57 in 459ms, sequenceid=11, compaction requested=false 2024-11-19T12:47:15,541 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:15,712 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:15,712 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-19T12:47:15,717 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/63696e5b6095400fbd481dee6557d720 is 1080, key is row0007/info:/1732020435084/Put/seqid=0 2024-11-19T12:47:15,718 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,718 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:15,719 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741847_1030 2024-11-19T12:47:15,719 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:15,721 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,721 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:15,721 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741848_1031 2024-11-19T12:47:15,721 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:15,723 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,723 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:15,723 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741849_1032 2024-11-19T12:47:15,724 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:15,726 WARN [Thread-927 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741850_1033 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:15,726 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53698 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033 to mirror 127.0.0.1:37729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,727 WARN [Thread-927 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:15,727 WARN [Thread-927 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033 2024-11-19T12:47:15,727 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53698 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033] {}] datanode.BlockReceiver(316): Block 1073741850 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:15,727 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53698 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741850_1033] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53698 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:15,727 WARN [Thread-927 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:15,728 WARN [IPC Server handler 4 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:15,728 WARN [IPC Server handler 4 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:15,729 WARN [IPC Server handler 4 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:15,732 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741851_1034 (size=12506) 2024-11-19T12:47:15,743 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,133 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/63696e5b6095400fbd481dee6557d720 2024-11-19T12:47:16,142 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/63696e5b6095400fbd481dee6557d720 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720 2024-11-19T12:47:16,149 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720, entries=7, sequenceid=24, filesize=12.2 K 2024-11-19T12:47:16,151 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 8de1681dda30e951b1c1e9986747cc57 in 438ms, sequenceid=24, compaction requested=false 2024-11-19T12:47:16,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:16,151 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-19T12:47:16,151 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:16,151 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720 because midkey is the same as first or last row 2024-11-19T12:47:16,989 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]] 2024-11-19T12:47:16,990 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,990 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C43765%2C1732020415622:(num 1732020432969) roll requested 2024-11-19T12:47:16,990 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020436990 2024-11-19T12:47:16,993 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,993 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:16,993 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741852_1035 2024-11-19T12:47:16,994 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:16,994 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,995 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:16,995 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741853_1036 2024-11-19T12:47:16,995 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:16,996 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,996 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:16,996 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741854_1037 2024-11-19T12:47:16,997 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:16,998 WARN [Thread-932 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741855_1038 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:16,998 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53726 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:16,998 WARN [Thread-932 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:16,999 WARN [Thread-932 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038 2024-11-19T12:47:16,999 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53726 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038] {}] datanode.BlockReceiver(316): Block 1073741855 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T12:47:16,999 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53726 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741855_1038] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53726 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:16,999 WARN [Thread-932 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:17,000 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:17,000 WARN [IPC Server handler 3 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:17,000 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:17,002 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:17,002 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:17,003 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:17,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:17,003 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:17,003 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020432969 with entries=25, filesize=25.38 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020436990 2024-11-19T12:47:17,004 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42621:42621)] 2024-11-19T12:47:17,004 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:17,004 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020432969 is not closed yet, will try archiving it next time 2024-11-19T12:47:17,004 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020428950 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C43765%2C1732020415622.1732020428950 2024-11-19T12:47:17,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741841_1024 (size=25992) 2024-11-19T12:47:17,133 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:17,133 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T12:47:17,142 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/c5bde2e51ae84695ae47c6c968a7f1b8 is 1079, key is tmprow/info:/1732020437131/Put/seqid=0 2024-11-19T12:47:17,144 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,144 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:17,144 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741857_1040 2024-11-19T12:47:17,145 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:17,146 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,146 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:17,146 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741858_1041 2024-11-19T12:47:17,147 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:17,148 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,148 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:17,148 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741859_1042 2024-11-19T12:47:17,149 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:17,151 WARN [Thread-937 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741860_1043 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36863 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53748 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043 to mirror 127.0.0.1:36863 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:17,151 WARN [Thread-937 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:17,151 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53748 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043] {}] datanode.BlockReceiver(316): Block 1073741860 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:17,151 WARN [Thread-937 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043 2024-11-19T12:47:17,151 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53748 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741860_1043] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53748 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:17,152 WARN [Thread-937 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:17,153 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:17,153 WARN [IPC Server handler 2 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:17,153 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:17,156 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741861_1044 (size=6027) 2024-11-19T12:47:17,406 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:17,410 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,557 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/c5bde2e51ae84695ae47c6c968a7f1b8 2024-11-19T12:47:17,564 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/c5bde2e51ae84695ae47c6c968a7f1b8 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8 2024-11-19T12:47:17,570 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8, entries=1, sequenceid=34, filesize=5.9 K 2024-11-19T12:47:17,572 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8de1681dda30e951b1c1e9986747cc57 in 438ms, sequenceid=34, compaction requested=true 2024-11-19T12:47:17,572 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:17,572 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-19T12:47:17,572 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:17,572 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720 because midkey is the same as first or last row 2024-11-19T12:47:17,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8de1681dda30e951b1c1e9986747cc57:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:47:17,573 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:47:17,573 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:47:17,575 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:47:17,575 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1541): 8de1681dda30e951b1c1e9986747cc57/info is initiating minor compaction (all files) 2024-11-19T12:47:17,575 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8de1681dda30e951b1c1e9986747cc57/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:17,575 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8] into tmpdir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp, totalSize=28.2 K 2024-11-19T12:47:17,576 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5f1fdfa4b242410690aecb2ddd30f3b3, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732020431022 2024-11-19T12:47:17,577 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting 63696e5b6095400fbd481dee6557d720, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732020435084 2024-11-19T12:47:17,577 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting c5bde2e51ae84695ae47c6c968a7f1b8, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732020437131 2024-11-19T12:47:17,593 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8de1681dda30e951b1c1e9986747cc57#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:47:17,594 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/464cefbe6da642908d5d239031b7699d is 1080, key is row0002/info:/1732020431022/Put/seqid=0 2024-11-19T12:47:17,596 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,596 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:17,596 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741862_1045 2024-11-19T12:47:17,597 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:17,598 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,598 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:17,598 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741863_1046 2024-11-19T12:47:17,599 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:17,601 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,601 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:17,601 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741864_1047 2024-11-19T12:47:17,602 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:17,605 WARN [Thread-944 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741865_1048 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:17,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53788 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048 to mirror 127.0.0.1:37729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:17,605 WARN [Thread-944 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:17,605 WARN [Thread-944 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048 2024-11-19T12:47:17,605 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53788 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048] {}] datanode.BlockReceiver(316): Block 1073741865 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:17,605 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53788 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741865_1048] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53788 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:17,606 WARN [Thread-944 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:17,607 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:17,607 WARN [IPC Server handler 3 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:17,607 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:17,612 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741866_1049 (size=17994) 2024-11-19T12:47:17,622 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/464cefbe6da642908d5d239031b7699d as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d 2024-11-19T12:47:17,629 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8de1681dda30e951b1c1e9986747cc57/info of 8de1681dda30e951b1c1e9986747cc57 into 464cefbe6da642908d5d239031b7699d(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:47:17,629 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:17,629 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57., storeName=8de1681dda30e951b1c1e9986747cc57/info, priority=13, startTime=1732020437572; duration=0sec 2024-11-19T12:47:17,629 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T12:47:17,629 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:17,629 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d because midkey is the same as first or last row 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d because midkey is the same as first or last row 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d because midkey is the same as first or last row 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:47:17,630 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8de1681dda30e951b1c1e9986747cc57:info 2024-11-19T12:47:17,743 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:18,510 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741851_1034 to 127.0.0.1:37729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:18,511 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4be71710[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741841_1024 to 127.0.0.1:37729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:18,553 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:18,554 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T12:47:18,560 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/6db9bf16aff543bbb244b7bbad49353f is 1079, key is tmprow/info:/1732020438552/Put/seqid=0 2024-11-19T12:47:18,562 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:18,562 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:18,562 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741867_1050 2024-11-19T12:47:18,563 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:18,564 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:18,564 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:18,564 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741868_1051 2024-11-19T12:47:18,565 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:18,567 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:18,567 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53816 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052 to mirror 127.0.0.1:37729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:18,567 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:18,567 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052 2024-11-19T12:47:18,567 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53816 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052] {}] datanode.BlockReceiver(316): Block 1073741869 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:18,567 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53816 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741869_1052] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53816 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:18,568 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:18,569 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741870_1053 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:18,570 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741870_1053 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:18,570 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741870_1053 2024-11-19T12:47:18,571 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:18,571 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:18,571 WARN [IPC Server handler 2 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:18,572 WARN [IPC Server handler 2 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:18,576 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741871_1054 (size=6027) 2024-11-19T12:47:18,976 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/6db9bf16aff543bbb244b7bbad49353f 2024-11-19T12:47:18,985 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/6db9bf16aff543bbb244b7bbad49353f as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f 2024-11-19T12:47:18,993 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f, entries=1, sequenceid=45, filesize=5.9 K 2024-11-19T12:47:18,994 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8de1681dda30e951b1c1e9986747cc57 in 441ms, sequenceid=45, compaction requested=false 2024-11-19T12:47:18,994 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-19T12:47:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:18,995 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d because midkey is the same as first or last row 2024-11-19T12:47:19,004 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]] 2024-11-19T12:47:19,005 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,005 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C43765%2C1732020415622:(num 1732020436990) roll requested 2024-11-19T12:47:19,005 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020439005 2024-11-19T12:47:19,008 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,008 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:19,008 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741872_1055 2024-11-19T12:47:19,009 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:19,010 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,010 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:19,011 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741873_1056 2024-11-19T12:47:19,011 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:19,013 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,013 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:19,013 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741874_1057 2024-11-19T12:47:19,014 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:19,016 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741875_1058 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,016 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741875_1058 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:19,016 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741875_1058 2024-11-19T12:47:19,017 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:19,017 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:19,017 WARN [IPC Server handler 3 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:19,017 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:19,020 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:19,020 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:19,020 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:19,021 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:19,021 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:19,021 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020436990 with entries=15, filesize=13.26 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020439005 2024-11-19T12:47:19,022 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42621:42621)] 2024-11-19T12:47:19,022 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:19,022 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020436990 is not closed yet, will try archiving it next time 2024-11-19T12:47:19,022 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020432969 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C43765%2C1732020415622.1732020432969 2024-11-19T12:47:19,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741856_1039 (size=13591) 2024-11-19T12:47:19,410 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,424 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 is not closed yet, will try archiving it next time 2024-11-19T12:47:19,511 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741866_1049 to 127.0.0.1:36863 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,511 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4be71710[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741861_1044 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,744 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,973 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:19,974 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T12:47:19,978 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/359a3364dcf5440ab2dd04a6960bcb3e is 1079, key is tmprow/info:/1732020439972/Put/seqid=0 2024-11-19T12:47:19,980 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,981 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:19,981 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741877_1060 2024-11-19T12:47:19,981 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:19,983 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,983 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:19,983 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741878_1061 2024-11-19T12:47:19,984 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:19,986 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53852 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062 to mirror 127.0.0.1:37729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,986 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:19,986 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53852 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062] {}] datanode.BlockReceiver(316): Block 1073741879 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:19,986 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062 2024-11-19T12:47:19,986 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53852 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741879_1062] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53852 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,987 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:19,989 WARN [Thread-959 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741880_1063 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:19,989 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53858 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,989 WARN [Thread-959 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:19,989 WARN [Thread-959 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063 2024-11-19T12:47:19,989 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53858 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063] {}] datanode.BlockReceiver(316): Block 1073741880 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:19,990 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53858 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741880_1063] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53858 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:19,990 WARN [Thread-959 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:19,991 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:19,991 WARN [IPC Server handler 3 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:19,991 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:19,994 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741881_1064 (size=6027) 2024-11-19T12:47:20,395 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/359a3364dcf5440ab2dd04a6960bcb3e 2024-11-19T12:47:20,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/359a3364dcf5440ab2dd04a6960bcb3e as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e 2024-11-19T12:47:20,407 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e, entries=1, sequenceid=55, filesize=5.9 K 2024-11-19T12:47:20,408 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 8de1681dda30e951b1c1e9986747cc57 in 435ms, sequenceid=55, compaction requested=true 2024-11-19T12:47:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-19T12:47:20,408 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d because midkey is the same as first or last row 2024-11-19T12:47:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 8de1681dda30e951b1c1e9986747cc57:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:47:20,409 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:47:20,409 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:47:20,410 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:47:20,410 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1541): 8de1681dda30e951b1c1e9986747cc57/info is initiating minor compaction (all files) 2024-11-19T12:47:20,410 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 8de1681dda30e951b1c1e9986747cc57/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:20,410 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e] into tmpdir=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp, totalSize=29.3 K 2024-11-19T12:47:20,411 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting 464cefbe6da642908d5d239031b7699d, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732020431022 2024-11-19T12:47:20,411 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6db9bf16aff543bbb244b7bbad49353f, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732020438552 2024-11-19T12:47:20,412 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] compactions.Compactor(225): Compacting 359a3364dcf5440ab2dd04a6960bcb3e, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732020439972 2024-11-19T12:47:20,428 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 8de1681dda30e951b1c1e9986747cc57#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:47:20,428 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/d5e7eb9fee8341c88f709a15e0b317a6 is 1080, key is row0002/info:/1732020431022/Put/seqid=0 2024-11-19T12:47:20,430 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:20,431 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:20,431 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741882_1065 2024-11-19T12:47:20,431 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:20,432 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:20,433 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]) is bad. 2024-11-19T12:47:20,433 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741883_1066 2024-11-19T12:47:20,433 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36863,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK] 2024-11-19T12:47:20,435 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:20,435 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]) is bad. 2024-11-19T12:47:20,435 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741884_1067 2024-11-19T12:47:20,435 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK] 2024-11-19T12:47:20,438 WARN [Thread-965 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741885_1068 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:20,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53862 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:20,438 WARN [Thread-965 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:20,438 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53862 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068] {}] datanode.BlockReceiver(316): Block 1073741885 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:20,438 WARN [Thread-965 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068 2024-11-19T12:47:20,438 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:53862 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741885_1068] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:53862 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:20,439 WARN [Thread-965 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:20,439 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-19T12:47:20,439 WARN [IPC Server handler 3 on default port 42615 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-19T12:47:20,440 WARN [IPC Server handler 3 on default port 42615 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-19T12:47:20,442 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741886_1069 (size=18097) 2024-11-19T12:47:20,851 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/d5e7eb9fee8341c88f709a15e0b317a6 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/d5e7eb9fee8341c88f709a15e0b317a6 2024-11-19T12:47:20,858 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 8de1681dda30e951b1c1e9986747cc57/info of 8de1681dda30e951b1c1e9986747cc57 into d5e7eb9fee8341c88f709a15e0b317a6(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:20,859 INFO [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57., storeName=8de1681dda30e951b1c1e9986747cc57/info, priority=13, startTime=1732020440409; duration=0sec 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/d5e7eb9fee8341c88f709a15e0b317a6 because midkey is the same as first or last row 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/d5e7eb9fee8341c88f709a15e0b317a6 because midkey is the same as first or last row 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/d5e7eb9fee8341c88f709a15e0b317a6 because midkey is the same as first or last row 2024-11-19T12:47:20,859 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:47:20,860 DEBUG [RS:0;aba5a916dfea:43765-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 8de1681dda30e951b1c1e9986747cc57:info 2024-11-19T12:47:21,022 WARN [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-19T12:47:21,022 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:21,197 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:21,200 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:21,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:21,201 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:21,201 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:47:21,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@79b8e13d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:21,202 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@fe3ec18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:21,299 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43e17015{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/java.io.tmpdir/jetty-localhost-40609-hadoop-hdfs-3_4_1-tests_jar-_-any-10740636157427159242/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:21,300 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4decd880{HTTP/1.1, (http/1.1)}{localhost:40609} 2024-11-19T12:47:21,300 INFO [Time-limited test {}] server.Server(415): Started @134998ms 2024-11-19T12:47:21,301 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:21,411 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:21,510 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4be71710[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741856_1039 to 127.0.0.1:40473 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:21,510 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741871_1054 to 127.0.0.1:36863 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:21,744 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:21,853 WARN [Thread-984 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:21,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x118e8f2705d6213f with lease ID 0x5e64b62b73df0674: from storage DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef node DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:47:21,861 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x118e8f2705d6213f with lease ID 0x5e64b62b73df0674: from storage DS-31d3546d-867b-4637-be0c-f8ec157cc6a5 node DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:22,511 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@4be71710[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741886_1069 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:22,511 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741881_1064 to 127.0.0.1:37729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:23,023 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:23,411 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:23,745 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:25,023 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:25,411 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:25,456 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:47:25,745 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,401 ERROR [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData-prefix:aba5a916dfea,32943,1732020415485 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,402 WARN [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData-prefix:aba5a916dfea,32943,1732020415485 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,402 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C32943%2C1732020415485:(num 1732020416173) roll requested 2024-11-19T12:47:26,402 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C32943%2C1732020415485.1732020446402 2024-11-19T12:47:26,406 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,406 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:59888 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4]'}, localName='127.0.0.1:45199', datanodeUuid='564cbb18-661f-470f-a689-c2b0f9d799ea', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:26,406 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45199,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:26,407 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070 2024-11-19T12:47:26,407 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:59888 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070] {}] datanode.BlockReceiver(316): Block 1073741887 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T12:47:26,407 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:59888 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741887_1070] {}] datanode.DataXceiver(331): 127.0.0.1:45199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59888 dst: /127.0.0.1:45199 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:26,408 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:26,411 WARN [Thread-1004 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741888_1071 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:37729 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,411 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:33160 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071 to mirror 127.0.0.1:37729 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:26,412 WARN [Thread-1004 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK]) is bad. 2024-11-19T12:47:26,412 WARN [Thread-1004 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071 2024-11-19T12:47:26,412 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:33160 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071] {}] datanode.BlockReceiver(316): Block 1073741888 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-19T12:47:26,412 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:33160 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741888_1071] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:33160 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:26,412 WARN [Thread-1004 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:37729,DS-4e0e02f3-66e5-48ac-82f0-883129507c11,DISK] 2024-11-19T12:47:26,416 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:26,417 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:26,417 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:26,417 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:26,417 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:26,417 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 with entries=54, filesize=26.68 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020446402 2024-11-19T12:47:26,417 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,418 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:26,418 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 2024-11-19T12:47:26,418 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:42621:42621)] 2024-11-19T12:47:26,418 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 is not closed yet, will try archiving it next time 2024-11-19T12:47:26,418 WARN [IPC Server handler 1 on default port 42615 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 has not been closed. Lease recovery is in progress. RecoveryId = 1073 for block blk_1073741830_1006 2024-11-19T12:47:26,419 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 after 1ms 2024-11-19T12:47:27,024 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:27,412 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:29,024 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:29,412 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:30,421 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 after 4003ms 2024-11-19T12:47:30,859 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@381fde50[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741836_1012 to 127.0.0.1:37729 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:30,860 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2d24fbec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741832_1008 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:31,025 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:31,413 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:47:31,860 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:47:31,873 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@4d0b04a3 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40473,null,null]) java.net.ConnectException: Call From aba5a916dfea/172.17.0.2 to localhost:43633 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T12:47:31,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741833_1019 (size=455) 2024-11-19T12:47:31,987 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020416679 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C43765%2C1732020415622.1732020416679 2024-11-19T12:47:31,988 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020436990 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C43765%2C1732020415622.1732020436990 2024-11-19T12:47:33,025 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:33,413 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:33,858 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2d24fbec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741833_1019 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:33,861 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:47:34,870 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.1732020454869 2024-11-19T12:47:34,879 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:34,879 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:34,880 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:34,880 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:34,880 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:34,880 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020439005 with entries=13, filesize=12.60 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020454869 2024-11-19T12:47:34,881 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39453:39453),(127.0.0.1/127.0.0.1:42621:42621)] 2024-11-19T12:47:34,881 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020439005 is not closed yet, will try archiving it next time 2024-11-19T12:47:34,882 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741876_1059 (size=12911) 2024-11-19T12:47:34,885 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43765 {}] regionserver.HRegion(8855): Flush requested on 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:34,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-19T12:47:34,890 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/096e694f0c244f27852d33bd6eb99928 is 1080, key is row0013/info:/1732020454882/Put/seqid=0 2024-11-19T12:47:34,893 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:59966 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4]'}, localName='127.0.0.1:45199', datanodeUuid='564cbb18-661f-470f-a689-c2b0f9d799ea', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:34,893 WARN [Thread-1027 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741891_1075 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:34,894 WARN [Thread-1027 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45199,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:34,894 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:59966 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075] {}] datanode.BlockReceiver(316): Block 1073741891 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:34,894 WARN [Thread-1027 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075 2024-11-19T12:47:34,894 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1312898632_22 at /127.0.0.1:59966 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741891_1075] {}] datanode.DataXceiver(331): 127.0.0.1:45199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59966 dst: /127.0.0.1:45199 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:34,894 WARN [Thread-1027 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:34,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741892_1076 (size=8190) 2024-11-19T12:47:34,900 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741892_1076 (size=8190) 2024-11-19T12:47:34,901 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/096e694f0c244f27852d33bd6eb99928 2024-11-19T12:47:34,912 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/096e694f0c244f27852d33bd6eb99928 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/096e694f0c244f27852d33bd6eb99928 2024-11-19T12:47:34,918 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/096e694f0c244f27852d33bd6eb99928, entries=3, sequenceid=66, filesize=8.0 K 2024-11-19T12:47:34,920 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7527, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9683 for 8de1681dda30e951b1c1e9986747cc57 in 34ms, sequenceid=66, compaction requested=false 2024-11-19T12:47:34,920 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 8de1681dda30e951b1c1e9986747cc57: 2024-11-19T12:47:34,920 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=25.7 K, sizeToCheck=16.0 K 2024-11-19T12:47:34,920 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:47:34,920 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/d5e7eb9fee8341c88f709a15e0b317a6 because midkey is the same as first or last row 2024-11-19T12:47:35,025 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-19T12:47:35,026 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,108 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:47:35,108 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:47:35,109 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:47:35,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:35,109 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:35,109 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:47:35,110 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:47:35,110 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=855045927, stopped=false 2024-11-19T12:47:35,111 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,32943,1732020415485 2024-11-19T12:47:35,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:47:35,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:47:35,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:47:35,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:35,172 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:35,173 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:35,173 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:47:35,173 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:47:35,174 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:47:35,174 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:47:35,174 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:35,174 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:47:35,174 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,43765,1732020415622' ***** 2024-11-19T12:47:35,174 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:47:35,175 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:47:35,175 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:47:35,175 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,42905,1732020417310' ***** 2024-11-19T12:47:35,175 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:47:35,175 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:47:35,175 INFO [RS:0;aba5a916dfea:43765 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:47:35,176 INFO [RS:0;aba5a916dfea:43765 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:47:35,176 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:47:35,176 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(3091): Received CLOSE for 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:35,176 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:47:35,176 INFO [RS:1;aba5a916dfea:42905 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:47:35,176 INFO [RS:1;aba5a916dfea:42905 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:47:35,176 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,42905,1732020417310 2024-11-19T12:47:35,176 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,43765,1732020415622 2024-11-19T12:47:35,176 INFO [RS:1;aba5a916dfea:42905 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:47:35,177 INFO [RS:1;aba5a916dfea:42905 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;aba5a916dfea:42905. 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:43765. 2024-11-19T12:47:35,177 DEBUG [RS:1;aba5a916dfea:42905 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:47:35,177 DEBUG [RS:1;aba5a916dfea:42905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:35,177 DEBUG [RS:0;aba5a916dfea:43765 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:47:35,177 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 8de1681dda30e951b1c1e9986747cc57, disabling compactions & flushes 2024-11-19T12:47:35,177 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,42905,1732020417310; all regions closed. 2024-11-19T12:47:35,177 DEBUG [RS:0;aba5a916dfea:43765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:35,177 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:47:35,177 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:47:35,177 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. after waiting 0 ms 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:47:35,177 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:35,177 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,177 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:47:35,178 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,178 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 8de1681dda30e951b1c1e9986747cc57 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-19T12:47:35,178 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,178 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,178 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,178 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T12:47:35,178 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 8de1681dda30e951b1c1e9986747cc57=TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.} 2024-11-19T12:47:35,178 DEBUG [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 8de1681dda30e951b1c1e9986747cc57 2024-11-19T12:47:35,179 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:47:35,179 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:47:35,179 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,179 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:47:35,179 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:47:35,179 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,179 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:47:35,179 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 2024-11-19T12:47:35,179 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-19T12:47:35,179 WARN [IPC Server handler 1 on default port 42615 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 has not been closed. Lease recovery is in progress. RecoveryId = 1077 for block blk_1073741837_1013 2024-11-19T12:47:35,179 ERROR [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0-prefix:aba5a916dfea,43765,1732020415622.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,179 WARN [FSHLog-0-hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0-prefix:aba5a916dfea,43765,1732020415622.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,180 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 after 1ms 2024-11-19T12:47:35,180 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C43765%2C1732020415622.meta:.meta(num 1732020417071) roll requested 2024-11-19T12:47:35,180 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43765%2C1732020415622.meta.1732020455180.meta 2024-11-19T12:47:35,183 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/48ffecb89f074dde8ada92eb504405d7 is 1080, key is row0015/info:/1732020454887/Put/seqid=0 2024-11-19T12:47:35,184 WARN [Thread-1037 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741894_1079 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,185 WARN [Thread-1037 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741894_1079 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:35,185 WARN [Thread-1037 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741894_1079 2024-11-19T12:47:35,185 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,185 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,185 WARN [Thread-1037 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:35,185 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,185 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,185 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,186 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020455180.meta 2024-11-19T12:47:35,186 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,186 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40473,DS-2b6b5844-d4c0-4dae-a817-c7510f6cafad,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,186 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta 2024-11-19T12:47:35,187 WARN [IPC Server handler 1 on default port 42615 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1010 2024-11-19T12:47:35,187 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta after 1ms 2024-11-19T12:47:35,190 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42621:42621),(127.0.0.1/127.0.0.1:39453:39453)] 2024-11-19T12:47:35,190 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta is not closed yet, will try archiving it next time 2024-11-19T12:47:35,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741895_1080 (size=14660) 2024-11-19T12:47:35,196 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741895_1080 (size=14660) 2024-11-19T12:47:35,196 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=78 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/48ffecb89f074dde8ada92eb504405d7 2024-11-19T12:47:35,205 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/.tmp/info/48ffecb89f074dde8ada92eb504405d7 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/48ffecb89f074dde8ada92eb504405d7 2024-11-19T12:47:35,211 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/info/36dc0ff264374bcd882964bbf6cf5167 is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57./info:regioninfo/1732020417817/Put/seqid=0 2024-11-19T12:47:35,211 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/48ffecb89f074dde8ada92eb504405d7, entries=9, sequenceid=78, filesize=14.3 K 2024-11-19T12:47:35,212 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8de1681dda30e951b1c1e9986747cc57 in 35ms, sequenceid=78, compaction requested=true 2024-11-19T12:47:35,213 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e] to archive 2024-11-19T12:47:35,214 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:47:35,216 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/5f1fdfa4b242410690aecb2ddd30f3b3 2024-11-19T12:47:35,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741896_1082 (size=7089) 2024-11-19T12:47:35,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741896_1082 (size=7089) 2024-11-19T12:47:35,218 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/info/36dc0ff264374bcd882964bbf6cf5167 2024-11-19T12:47:35,218 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/63696e5b6095400fbd481dee6557d720 2024-11-19T12:47:35,220 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/464cefbe6da642908d5d239031b7699d 2024-11-19T12:47:35,221 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/c5bde2e51ae84695ae47c6c968a7f1b8 2024-11-19T12:47:35,223 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/6db9bf16aff543bbb244b7bbad49353f 2024-11-19T12:47:35,224 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/info/359a3364dcf5440ab2dd04a6960bcb3e 2024-11-19T12:47:35,224 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=aba5a916dfea:32943 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T12:47:35,225 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5f1fdfa4b242410690aecb2ddd30f3b3=10347, 63696e5b6095400fbd481dee6557d720=12506, 464cefbe6da642908d5d239031b7699d=17994, c5bde2e51ae84695ae47c6c968a7f1b8=6027, 6db9bf16aff543bbb244b7bbad49353f=6027, 359a3364dcf5440ab2dd04a6960bcb3e=6027] 2024-11-19T12:47:35,235 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/default/TestLogRolling-testLogRollOnDatanodeDeath/8de1681dda30e951b1c1e9986747cc57/recovered.edits/81.seqid, newMaxSeqId=81, maxSeqId=1 2024-11-19T12:47:35,236 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:35,236 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 8de1681dda30e951b1c1e9986747cc57: Waiting for close lock at 1732020455177Running coprocessor pre-close hooks at 1732020455177Disabling compacts and flushes for region at 1732020455177Disabling writes for close at 1732020455177Obtaining lock to block concurrent updates at 1732020455178 (+1 ms)Preparing flush snapshotting stores in 8de1681dda30e951b1c1e9986747cc57 at 1732020455178Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57., syncing WAL and waiting on mvcc, flushsize=dataSize=9683, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732020455178Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. at 1732020455179 (+1 ms)Flushing 8de1681dda30e951b1c1e9986747cc57/info: creating writer at 1732020455179Flushing 8de1681dda30e951b1c1e9986747cc57/info: appending metadata at 1732020455183 (+4 ms)Flushing 8de1681dda30e951b1c1e9986747cc57/info: closing flushed file at 1732020455183Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@300cea07: reopening flushed file at 1732020455204 (+21 ms)Finished flush of dataSize ~9.46 KB/9683, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 8de1681dda30e951b1c1e9986747cc57 in 35ms, sequenceid=78, compaction requested=true at 1732020455212 (+8 ms)Writing region close event to WAL at 1732020455225 (+13 ms)Running coprocessor post-close hooks at 1732020455236 (+11 ms)Closed at 1732020455236 2024-11-19T12:47:35,237 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732020417443.8de1681dda30e951b1c1e9986747cc57. 2024-11-19T12:47:35,246 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/ns/418cf93001184675976ad9e972eba3b4 is 43, key is default/ns:d/1732020417171/Put/seqid=0 2024-11-19T12:47:35,248 WARN [Thread-1054 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741897_1083 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,248 WARN [Thread-1054 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741897_1083 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:35,248 WARN [Thread-1054 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741897_1083 2024-11-19T12:47:35,248 WARN [Thread-1054 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:35,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741898_1084 (size=5153) 2024-11-19T12:47:35,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741898_1084 (size=5153) 2024-11-19T12:47:35,254 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/ns/418cf93001184675976ad9e972eba3b4 2024-11-19T12:47:35,276 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/table/a1231f45b96649bbbb61111d63b6135b is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732020417831/Put/seqid=0 2024-11-19T12:47:35,278 WARN [Thread-1061 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741899_1085 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:35,278 WARN [Thread-1061 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741899_1085 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:35,278 WARN [Thread-1061 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741899_1085 2024-11-19T12:47:35,278 WARN [Thread-1061 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:35,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741900_1086 (size=5424) 2024-11-19T12:47:35,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741900_1086 (size=5424) 2024-11-19T12:47:35,282 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.1732020439005 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C43765%2C1732020415622.1732020439005 2024-11-19T12:47:35,283 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/table/a1231f45b96649bbbb61111d63b6135b 2024-11-19T12:47:35,290 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/info/36dc0ff264374bcd882964bbf6cf5167 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/info/36dc0ff264374bcd882964bbf6cf5167 2024-11-19T12:47:35,297 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/info/36dc0ff264374bcd882964bbf6cf5167, entries=10, sequenceid=11, filesize=6.9 K 2024-11-19T12:47:35,298 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/ns/418cf93001184675976ad9e972eba3b4 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/ns/418cf93001184675976ad9e972eba3b4 2024-11-19T12:47:35,304 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/ns/418cf93001184675976ad9e972eba3b4, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:47:35,306 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/.tmp/table/a1231f45b96649bbbb61111d63b6135b as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/table/a1231f45b96649bbbb61111d63b6135b 2024-11-19T12:47:35,312 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/table/a1231f45b96649bbbb61111d63b6135b, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T12:47:35,314 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false 2024-11-19T12:47:35,321 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:47:35,322 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:47:35,322 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:47:35,322 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020455178Running coprocessor pre-close hooks at 1732020455178Disabling compacts and flushes for region at 1732020455178Disabling writes for close at 1732020455179 (+1 ms)Obtaining lock to block concurrent updates at 1732020455179Preparing flush snapshotting stores in 1588230740 at 1732020455179Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732020455179Flushing stores of hbase:meta,,1.1588230740 at 1732020455191 (+12 ms)Flushing 1588230740/info: creating writer at 1732020455191Flushing 1588230740/info: appending metadata at 1732020455211 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732020455211Flushing 1588230740/ns: creating writer at 1732020455224 (+13 ms)Flushing 1588230740/ns: appending metadata at 1732020455245 (+21 ms)Flushing 1588230740/ns: closing flushed file at 1732020455245Flushing 1588230740/table: creating writer at 1732020455262 (+17 ms)Flushing 1588230740/table: appending metadata at 1732020455276 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732020455276Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4b09f3e5: reopening flushed file at 1732020455289 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@456d59a4: reopening flushed file at 1732020455297 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73968848: reopening flushed file at 1732020455305 (+8 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 134ms, sequenceid=11, compaction requested=false at 1732020455314 (+9 ms)Writing region close event to WAL at 1732020455317 (+3 ms)Running coprocessor post-close hooks at 1732020455322 (+5 ms)Closed at 1732020455322 2024-11-19T12:47:35,322 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:47:35,378 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,43765,1732020415622; all regions closed. 2024-11-19T12:47:35,379 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,379 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,379 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,379 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,379 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:35,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741893_1078 (size=825) 2024-11-19T12:47:35,382 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741893_1078 (size=825) 2024-11-19T12:47:35,409 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:47:35,413 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:47:35,413 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:47:35,544 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:47:35,544 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:47:36,512 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@3f9ebff3[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:46381, datanodeUuid=131d3895-95b2-4992-b905-55d32681ea7f, infoPort=42621, infoSecurePort=0, ipcPort=35735, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741876_1059 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:36,545 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:47:36,858 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@2d24fbec[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741831_1007 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:36,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741835_1011 (size=393) 2024-11-19T12:47:37,537 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T12:47:37,537 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T12:47:37,858 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@381fde50[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:45199, datanodeUuid=564cbb18-661f-470f-a689-c2b0f9d799ea, infoPort=39453, infoSecurePort=0, ipcPort=35679, storageInfo=lv=-57;cid=testClusterID;nsid=998836896;c=1732020413628):Failed to transfer BP-212595389-172.17.0.2-1732020413628:blk_1073741829_1005 to 127.0.0.1:36929 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:37,859 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:47:39,181 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 after 4002ms 2024-11-19T12:47:39,188 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta after 4002ms 2024-11-19T12:47:40,179 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T12:47:40,182 DEBUG [RS:1;aba5a916dfea:42905 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs 2024-11-19T12:47:40,182 INFO [RS:1;aba5a916dfea:42905 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C42905%2C1732020417310:(num 1732020417545) 2024-11-19T12:47:40,182 DEBUG [RS:1;aba5a916dfea:42905 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:40,182 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:47:40,182 INFO [RS:1;aba5a916dfea:42905 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:47:40,183 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:47:40,183 INFO [RS:1;aba5a916dfea:42905 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42905 2024-11-19T12:47:40,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,42905,1732020417310 2024-11-19T12:47:40,238 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:47:40,238 INFO [RS:1;aba5a916dfea:42905 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:47:40,240 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,250 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,42905,1732020417310] 2024-11-19T12:47:40,258 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,42905,1732020417310 already deleted, retry=false 2024-11-19T12:47:40,258 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,42905,1732020417310 expired; onlineServers=1 2024-11-19T12:47:40,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,262 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,263 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:40,350 INFO [RS:1;aba5a916dfea:42905 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:47:40,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:42905-0x101546bbea90002, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:40,350 INFO [RS:1;aba5a916dfea:42905 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,42905,1732020417310; zookeeper connection closed. 2024-11-19T12:47:40,351 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@684f677d {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@684f677d 2024-11-19T12:47:40,380 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-19T12:47:40,384 DEBUG [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs 2024-11-19T12:47:40,384 INFO [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C43765%2C1732020415622.meta:.meta(num 1732020455180) 2024-11-19T12:47:40,384 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,384 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,385 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,385 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,385 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,387 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741890_1074 (size=14682) 2024-11-19T12:47:40,388 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741890_1074 (size=14682) 2024-11-19T12:47:40,390 DEBUG [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs 2024-11-19T12:47:40,390 INFO [RS:0;aba5a916dfea:43765 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C43765%2C1732020415622:(num 1732020454869) 2024-11-19T12:47:40,390 DEBUG [RS:0;aba5a916dfea:43765 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:40,390 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:47:40,390 INFO [RS:0;aba5a916dfea:43765 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:47:40,391 INFO [RS:0;aba5a916dfea:43765 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:47:40,391 INFO [RS:0;aba5a916dfea:43765 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:47:40,391 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:47:40,391 INFO [RS:0;aba5a916dfea:43765 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43765 2024-11-19T12:47:40,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,43765,1732020415622 2024-11-19T12:47:40,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:47:40,417 INFO [RS:0;aba5a916dfea:43765 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:47:40,425 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,43765,1732020415622] 2024-11-19T12:47:40,433 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,43765,1732020415622 already deleted, retry=false 2024-11-19T12:47:40,433 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,43765,1732020415622 expired; onlineServers=0 2024-11-19T12:47:40,433 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,32943,1732020415485' ***** 2024-11-19T12:47:40,433 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:47:40,434 INFO [M:0;aba5a916dfea:32943 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:47:40,434 INFO [M:0;aba5a916dfea:32943 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:47:40,434 DEBUG [M:0;aba5a916dfea:32943 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:47:40,434 DEBUG [M:0;aba5a916dfea:32943 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:47:40,434 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020416402 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020416402,5,FailOnTimeoutGroup] 2024-11-19T12:47:40,434 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020416407 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020416407,5,FailOnTimeoutGroup] 2024-11-19T12:47:40,434 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:47:40,434 INFO [M:0;aba5a916dfea:32943 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:47:40,434 INFO [M:0;aba5a916dfea:32943 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:47:40,434 DEBUG [M:0;aba5a916dfea:32943 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:47:40,435 INFO [M:0;aba5a916dfea:32943 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:47:40,435 INFO [M:0;aba5a916dfea:32943 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:47:40,435 INFO [M:0;aba5a916dfea:32943 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:47:40,435 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:47:40,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:47:40,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:40,442 DEBUG [M:0;aba5a916dfea:32943 {}] zookeeper.ZKUtil(347): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:47:40,442 WARN [M:0;aba5a916dfea:32943 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:47:40,443 INFO [M:0;aba5a916dfea:32943 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/.lastflushedseqids 2024-11-19T12:47:40,447 WARN [Thread-1072 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741901_1087 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:40,447 WARN [Thread-1072 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741901_1087 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:45199,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:40,447 WARN [Thread-1072 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741901_1087 2024-11-19T12:47:40,448 WARN [Thread-1072 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:40,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741902_1088 (size=130) 2024-11-19T12:47:40,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741902_1088 (size=130) 2024-11-19T12:47:40,457 INFO [M:0;aba5a916dfea:32943 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:47:40,457 INFO [M:0;aba5a916dfea:32943 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:47:40,457 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:47:40,457 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:40,458 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:40,458 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:47:40,458 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:40,458 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.26 KB heapSize=29.50 KB 2024-11-19T12:47:40,475 DEBUG [M:0;aba5a916dfea:32943 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17f82d42280f45a6a942f473b6cbcfc6 is 82, key is hbase:meta,,1/info:regioninfo/1732020417110/Put/seqid=0 2024-11-19T12:47:40,477 WARN [Thread-1078 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741903_1089 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:40,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:60046 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4]'}, localName='127.0.0.1:45199', datanodeUuid='564cbb18-661f-470f-a689-c2b0f9d799ea', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,477 WARN [Thread-1078 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45199,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:40,477 WARN [Thread-1078 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089 2024-11-19T12:47:40,477 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:60046 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089] {}] datanode.BlockReceiver(316): Block 1073741903 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:40,477 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:60046 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741903_1089] {}] datanode.DataXceiver(331): 127.0.0.1:45199:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60046 dst: /127.0.0.1:45199 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,478 WARN [Thread-1078 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741904_1090 (size=5672) 2024-11-19T12:47:40,482 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741904_1090 (size=5672) 2024-11-19T12:47:40,483 INFO [M:0;aba5a916dfea:32943 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17f82d42280f45a6a942f473b6cbcfc6 2024-11-19T12:47:40,504 DEBUG [M:0;aba5a916dfea:32943 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5694d56578c946fd9fc2b4349b6b715c is 775, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020417836/Put/seqid=0 2024-11-19T12:47:40,507 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54184 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,507 WARN [Thread-1086 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741905_1091 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:40,508 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54184 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091] {}] datanode.BlockReceiver(316): Block 1073741905 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:40,508 WARN [Thread-1086 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:40,508 WARN [Thread-1086 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091 2024-11-19T12:47:40,508 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54184 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741905_1091] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54184 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,508 WARN [Thread-1086 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:40,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741906_1092 (size=6256) 2024-11-19T12:47:40,512 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741906_1092 (size=6256) 2024-11-19T12:47:40,513 INFO [M:0;aba5a916dfea:32943 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.59 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5694d56578c946fd9fc2b4349b6b715c 2024-11-19T12:47:40,518 INFO [M:0;aba5a916dfea:32943 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5694d56578c946fd9fc2b4349b6b715c 2024-11-19T12:47:40,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:40,525 INFO [RS:0;aba5a916dfea:43765 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:47:40,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43765-0x101546bbea90001, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:40,525 INFO [RS:0;aba5a916dfea:43765 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,43765,1732020415622; zookeeper connection closed. 2024-11-19T12:47:40,525 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@2a231aa3 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@2a231aa3 2024-11-19T12:47:40,526 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-19T12:47:40,531 DEBUG [M:0;aba5a916dfea:32943 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/537e43c428324a1ebeeb15d66b1e3242 is 69, key is aba5a916dfea,42905,1732020417310/rs:state/1732020417388/Put/seqid=0 2024-11-19T12:47:40,533 WARN [Thread-1093 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741907_1093 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36929 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:40,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54198 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6]'}, localName='127.0.0.1:46381', datanodeUuid='131d3895-95b2-4992-b905-55d32681ea7f', xmitsInProgress=0}:Exception transferring block BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093 to mirror 127.0.0.1:36929 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,533 WARN [Thread-1093 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093 in pipeline [DatanodeInfoWithStorage[127.0.0.1:46381,DS-3040ff47-9a5c-46c7-989a-d458c315c660,DISK], DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:40,533 WARN [Thread-1093 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093 2024-11-19T12:47:40,533 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54198 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093] {}] datanode.BlockReceiver(316): Block 1073741907 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-19T12:47:40,533 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-106272601_22 at /127.0.0.1:54198 [Receiving block BP-212595389-172.17.0.2-1732020413628:blk_1073741907_1093] {}] datanode.DataXceiver(331): 127.0.0.1:46381:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54198 dst: /127.0.0.1:46381 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:40,534 WARN [Thread-1093 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:40,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741908_1094 (size=5224) 2024-11-19T12:47:40,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741908_1094 (size=5224) 2024-11-19T12:47:40,538 INFO [M:0;aba5a916dfea:32943 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/537e43c428324a1ebeeb15d66b1e3242 2024-11-19T12:47:40,561 DEBUG [M:0;aba5a916dfea:32943 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ba9e1407f3b4785adb2d6f7db965fc8 is 52, key is load_balancer_on/state:d/1732020417293/Put/seqid=0 2024-11-19T12:47:40,563 WARN [Thread-1101 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741909_1095 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:40,563 WARN [Thread-1101 {}] hdfs.DataStreamer(1731): Error Recovery for BP-212595389-172.17.0.2-1732020413628:blk_1073741909_1095 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK], DatanodeInfoWithStorage[127.0.0.1:45199,DS-fab1e5ac-2c87-4fa0-98a3-880ccddb73ef,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK]) is bad. 2024-11-19T12:47:40,563 WARN [Thread-1101 {}] hdfs.DataStreamer(1850): Abandoning BP-212595389-172.17.0.2-1732020413628:blk_1073741909_1095 2024-11-19T12:47:40,563 WARN [Thread-1101 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36929,DS-8bc011f2-0d22-4683-b0cc-699b1522c3cd,DISK] 2024-11-19T12:47:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741910_1096 (size=5056) 2024-11-19T12:47:40,567 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741910_1096 (size=5056) 2024-11-19T12:47:40,568 INFO [M:0;aba5a916dfea:32943 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ba9e1407f3b4785adb2d6f7db965fc8 2024-11-19T12:47:40,574 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/17f82d42280f45a6a942f473b6cbcfc6 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17f82d42280f45a6a942f473b6cbcfc6 2024-11-19T12:47:40,580 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/17f82d42280f45a6a942f473b6cbcfc6, entries=8, sequenceid=60, filesize=5.5 K 2024-11-19T12:47:40,581 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/5694d56578c946fd9fc2b4349b6b715c as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5694d56578c946fd9fc2b4349b6b715c 2024-11-19T12:47:40,586 INFO [M:0;aba5a916dfea:32943 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 5694d56578c946fd9fc2b4349b6b715c 2024-11-19T12:47:40,586 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/5694d56578c946fd9fc2b4349b6b715c, entries=6, sequenceid=60, filesize=6.1 K 2024-11-19T12:47:40,587 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/537e43c428324a1ebeeb15d66b1e3242 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/537e43c428324a1ebeeb15d66b1e3242 2024-11-19T12:47:40,592 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/537e43c428324a1ebeeb15d66b1e3242, entries=2, sequenceid=60, filesize=5.1 K 2024-11-19T12:47:40,593 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/4ba9e1407f3b4785adb2d6f7db965fc8 as hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4ba9e1407f3b4785adb2d6f7db965fc8 2024-11-19T12:47:40,598 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/4ba9e1407f3b4785adb2d6f7db965fc8, entries=1, sequenceid=60, filesize=4.9 K 2024-11-19T12:47:40,600 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false 2024-11-19T12:47:40,602 INFO [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:40,602 DEBUG [M:0;aba5a916dfea:32943 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020460457Disabling compacts and flushes for region at 1732020460457Disabling writes for close at 1732020460458 (+1 ms)Obtaining lock to block concurrent updates at 1732020460458Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020460458Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23817, getHeapSize=30144, getOffHeapSize=0, getCellsCount=71 at 1732020460458Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020460459 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020460459Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020460474 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020460474Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020460488 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020460504 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020460504Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020460518 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020460530 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020460530Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020460544 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020460560 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020460560Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@74c4e11e: reopening flushed file at 1732020460573 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@174042e5: reopening flushed file at 1732020460580 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f9fb494: reopening flushed file at 1732020460586 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@408467a8: reopening flushed file at 1732020460593 (+7 ms)Finished flush of dataSize ~23.26 KB/23817, heapSize ~29.44 KB/30144, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 142ms, sequenceid=60, compaction requested=false at 1732020460600 (+7 ms)Writing region close event to WAL at 1732020460602 (+2 ms)Closed at 1732020460602 2024-11-19T12:47:40,602 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,602 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,603 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,603 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,603 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:40,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:46381 is added to blk_1073741889_1072 (size=1045) 2024-11-19T12:47:40,605 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45199 is added to blk_1073741889_1072 (size=1045) 2024-11-19T12:47:40,776 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:47:40,797 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,798 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,799 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,803 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:40,805 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:41,182 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:41,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:41,877 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@40f2a9e4 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-212595389-172.17.0.2-1732020413628:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40473,null,null]) java.net.ConnectException: Call From aba5a916dfea/172.17.0.2 to localhost:43633 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T12:47:42,183 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:42,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:42,432 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/WALs/aba5a916dfea,32943,1732020415485/aba5a916dfea%2C32943%2C1732020415485.1732020416173 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/oldWALs/aba5a916dfea%2C32943%2C1732020415485.1732020416173 2024-11-19T12:47:42,435 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/MasterData/oldWALs/aba5a916dfea%2C32943%2C1732020415485.1732020416173 to hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/oldWALs/aba5a916dfea%2C32943%2C1732020415485.1732020416173$masterlocalwal$ 2024-11-19T12:47:42,435 INFO [M:0;aba5a916dfea:32943 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:47:42,435 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:47:42,435 INFO [M:0;aba5a916dfea:32943 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:32943 2024-11-19T12:47:42,435 INFO [M:0;aba5a916dfea:32943 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:47:42,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:42,579 INFO [M:0;aba5a916dfea:32943 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:47:42,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:32943-0x101546bbea90000, quorum=127.0.0.1:49346, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:47:42,581 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43e17015{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:42,581 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4decd880{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:42,581 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:42,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@fe3ec18{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:42,582 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@79b8e13d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:42,583 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:42,583 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:42,583 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 564cbb18-661f-470f-a689-c2b0f9d799ea) service to localhost/127.0.0.1:42615 2024-11-19T12:47:42,583 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:42,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@784cda25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40473,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:43633 , LocalHost:localPort aba5a916dfea/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-19T12:47:42,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@784cda25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-212595389-172.17.0.2-1732020413628:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:45199,null,null], DatanodeInfoWithStorage[127.0.0.1:40473,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: No block pool offer service for bpid=BP-212595389-172.17.0.2-1732020413628 2024-11-19T12:47:42,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@784cda25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40473,null,null]) java.io.IOException: No block pool offer service for bpid=BP-212595389-172.17.0.2-1732020413628 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:42,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@784cda25 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:45199,null,null]) java.io.IOException: No block pool offer service for bpid=BP-212595389-172.17.0.2-1732020413628 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:42,583 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data3/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:42,583 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@784cda25 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40473,null,null], DatanodeInfoWithStorage[127.0.0.1:45199,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-212595389-172.17.0.2-1732020413628:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40473,null,null], DatanodeInfoWithStorage[127.0.0.1:45199,null,null]] 2024-11-19T12:47:42,583 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data4/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:42,584 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:42,585 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47557d13{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:42,585 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@364e0d85{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:42,585 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:42,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2bcd68b8{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:42,586 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7461e1e1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:42,587 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:42,587 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:42,587 WARN [BP-212595389-172.17.0.2-1732020413628 heartbeating to localhost/127.0.0.1:42615 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-212595389-172.17.0.2-1732020413628 (Datanode Uuid 131d3895-95b2-4992-b905-55d32681ea7f) service to localhost/127.0.0.1:42615 2024-11-19T12:47:42,587 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:42,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data5/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:42,587 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/cluster_76283095-a0d9-9dbc-e85d-36c6fe22b258/data/data6/current/BP-212595389-172.17.0.2-1732020413628 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:42,587 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:42,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@a95d0{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:47:42,592 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1222fb27{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:42,592 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:42,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1cbabe3e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:42,592 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@75b4bf6{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:42,600 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:47:42,629 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:47:42,637 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f74b4bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42615 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:34279 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42615 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:42615 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:34279 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42615 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-14-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42615 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$900/0x00007f74b4bef5c0.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:42615 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42615 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-14-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-15-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42615 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=450 (was 404) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=186 (was 157) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=6347 (was 7262) 2024-11-19T12:47:42,643 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=186, ProcessCount=11, AvailableMemoryMB=6346 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.log.dir so I do NOT create it in target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/44ca50f7-95c0-9783-5bc4-4ce8262254ce/hadoop.tmp.dir so I do NOT create it in target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2, deleteOnExit=true 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/test.cache.data in system properties and HBase conf 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:47:42,644 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:47:42,644 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:47:42,645 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:47:42,646 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:47:42,658 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:47:42,969 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:42,975 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:42,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:42,976 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:42,976 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:47:42,977 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:42,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@675d0ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:42,977 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5260e8b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:43,071 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43123a6e{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-41829-hadoop-hdfs-3_4_1-tests_jar-_-any-1469279123973106818/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:47:43,071 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@633966fa{HTTP/1.1, (http/1.1)}{localhost:41829} 2024-11-19T12:47:43,072 INFO [Time-limited test {}] server.Server(415): Started @156769ms 2024-11-19T12:47:43,083 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:47:43,184 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:43,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:43,265 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:43,268 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:43,269 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:43,269 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:43,269 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:47:43,270 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3537f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:43,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19254d5f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:43,363 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5bec5d92{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-41433-hadoop-hdfs-3_4_1-tests_jar-_-any-12608332536437448915/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:43,363 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@21998c84{HTTP/1.1, (http/1.1)}{localhost:41433} 2024-11-19T12:47:43,364 INFO [Time-limited test {}] server.Server(415): Started @157062ms 2024-11-19T12:47:43,365 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:43,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-19T12:47:43,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:47:43,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:47:43,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:47:43,391 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:43,395 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:43,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:43,396 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:43,396 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:47:43,397 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c66348c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:43,398 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5cd8260b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:43,492 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@167013d4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-37359-hadoop-hdfs-3_4_1-tests_jar-_-any-1951707861460621390/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:43,493 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@47718438{HTTP/1.1, (http/1.1)}{localhost:37359} 2024-11-19T12:47:43,493 INFO [Time-limited test {}] server.Server(415): Started @157191ms 2024-11-19T12:47:43,494 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:44,066 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data1/current/BP-1869456203-172.17.0.2-1732020462662/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:44,066 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data2/current/BP-1869456203-172.17.0.2-1732020462662/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:44,085 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e9903276834b6c9 with lease ID 0xa351f8c4ab2b9a52: Processing first storage report for DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7 from datanode DatanodeRegistration(127.0.0.1:37255, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=38877, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662) 2024-11-19T12:47:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e9903276834b6c9 with lease ID 0xa351f8c4ab2b9a52: from storage DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7 node DatanodeRegistration(127.0.0.1:37255, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=38877, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x7e9903276834b6c9 with lease ID 0xa351f8c4ab2b9a52: Processing first storage report for DS-e7ca1be6-deb1-4c84-95bf-5342aef250a3 from datanode DatanodeRegistration(127.0.0.1:37255, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=38877, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662) 2024-11-19T12:47:44,087 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7e9903276834b6c9 with lease ID 0xa351f8c4ab2b9a52: from storage DS-e7ca1be6-deb1-4c84-95bf-5342aef250a3 node DatanodeRegistration(127.0.0.1:37255, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=38877, infoSecurePort=0, ipcPort=41511, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:47:44,185 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:44,191 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:44,267 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data3/current/BP-1869456203-172.17.0.2-1732020462662/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:44,267 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data4/current/BP-1869456203-172.17.0.2-1732020462662/current, will proceed with Du for space computation calculation, 2024-11-19T12:47:44,282 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:44,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b0d620495a401a7 with lease ID 0xa351f8c4ab2b9a53: Processing first storage report for DS-64fd7608-9993-4752-89a9-a2adf44fe158 from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=41165, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662) 2024-11-19T12:47:44,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b0d620495a401a7 with lease ID 0xa351f8c4ab2b9a53: from storage DS-64fd7608-9993-4752-89a9-a2adf44fe158 node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=41165, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:44,284 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9b0d620495a401a7 with lease ID 0xa351f8c4ab2b9a53: Processing first storage report for DS-ace0dfdb-4beb-4ccb-b677-a0f2c1497cff from datanode DatanodeRegistration(127.0.0.1:34575, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=41165, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662) 2024-11-19T12:47:44,284 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9b0d620495a401a7 with lease ID 0xa351f8c4ab2b9a53: from storage DS-ace0dfdb-4beb-4ccb-b677-a0f2c1497cff node DatanodeRegistration(127.0.0.1:34575, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=41165, infoSecurePort=0, ipcPort=33523, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:44,323 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791 2024-11-19T12:47:44,326 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/zookeeper_0, clientPort=52390, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:47:44,327 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=52390 2024-11-19T12:47:44,327 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:47:44,337 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:47:44,338 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d with version=8 2024-11-19T12:47:44,338 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:47:44,340 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:47:44,340 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:47:44,341 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40913 2024-11-19T12:47:44,343 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40913 connecting to ZooKeeper ensemble=127.0.0.1:52390 2024-11-19T12:47:44,391 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:409130x0, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:47:44,392 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40913-0x101546c7d8a0000 connected 2024-11-19T12:47:44,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,456 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,460 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:47:44,460 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d, hbase.cluster.distributed=false 2024-11-19T12:47:44,462 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:47:44,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40913 2024-11-19T12:47:44,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40913 2024-11-19T12:47:44,463 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40913 2024-11-19T12:47:44,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40913 2024-11-19T12:47:44,464 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40913 2024-11-19T12:47:44,478 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:47:44,478 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:47:44,479 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35045 2024-11-19T12:47:44,480 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35045 connecting to ZooKeeper ensemble=127.0.0.1:52390 2024-11-19T12:47:44,481 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,483 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:350450x0, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:47:44,494 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:350450x0, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:47:44,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35045-0x101546c7d8a0001 connected 2024-11-19T12:47:44,494 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:47:44,495 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:47:44,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:47:44,497 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:47:44,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-19T12:47:44,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35045 2024-11-19T12:47:44,498 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35045 2024-11-19T12:47:44,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-19T12:47:44,499 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35045 2024-11-19T12:47:44,512 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:40913 2024-11-19T12:47:44,512 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:47:44,518 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:47:44,519 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:47:44,526 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,527 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:47:44,527 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,40913,1732020464340 from backup master directory 2024-11-19T12:47:44,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:47:44,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:47:44,535 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:47:44,535 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,539 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/hbase.id] with ID: 5445cbae-3ce9-424d-8edd-7ba045da008b 2024-11-19T12:47:44,539 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/.tmp/hbase.id 2024-11-19T12:47:44,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:47:44,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:47:44,546 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/.tmp/hbase.id]:[hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/hbase.id] 2024-11-19T12:47:44,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:44,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:47:44,560 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T12:47:44,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:47:44,574 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:47:44,575 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:47:44,576 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:47:44,576 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:47:44,582 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:47:44,583 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:47:44,584 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store 2024-11-19T12:47:44,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:47:44,590 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:47:44,591 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:44,591 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:47:44,591 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:44,591 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:44,591 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:47:44,592 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:44,592 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:47:44,592 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020464591Disabling compacts and flushes for region at 1732020464591Disabling writes for close at 1732020464591Writing region close event to WAL at 1732020464592 (+1 ms)Closed at 1732020464592 2024-11-19T12:47:44,593 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/.initializing 2024-11-19T12:47:44,593 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,595 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40913%2C1732020464340, suffix=, logDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340, archiveDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/oldWALs, maxLogs=10 2024-11-19T12:47:44,596 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40913%2C1732020464340.1732020464596 2024-11-19T12:47:44,600 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 2024-11-19T12:47:44,601 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-19T12:47:44,601 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:47:44,602 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:44,602 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,602 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,606 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:47:44,608 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:44,608 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,609 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:47:44,609 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:47:44,610 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:47:44,611 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:47:44,611 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:47:44,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:47:44,613 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,614 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,614 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,615 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,615 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,616 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:47:44,617 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:47:44,623 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:47:44,623 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=809260, jitterRate=0.029028505086898804}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:47:44,624 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020464602Initializing all the Stores at 1732020464603 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020464603Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020464606 (+3 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020464606Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020464606Cleaning up temporary data from old regions at 1732020464615 (+9 ms)Region opened successfully at 1732020464624 (+9 ms) 2024-11-19T12:47:44,624 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:47:44,627 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37f325a1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:47:44,628 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:47:44,628 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:47:44,628 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:47:44,628 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:47:44,629 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:47:44,629 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:47:44,629 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:47:44,631 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:47:44,632 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:47:44,643 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:47:44,644 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:47:44,645 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:47:44,652 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:47:44,652 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:47:44,653 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:47:44,660 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:47:44,661 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:47:44,668 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:47:44,671 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:47:44,676 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:47:44,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:47:44,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:47:44,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,685 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,686 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,40913,1732020464340, sessionid=0x101546c7d8a0000, setting cluster-up flag (Was=false) 2024-11-19T12:47:44,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,702 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,727 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:47:44,728 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,743 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:44,768 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:47:44,770 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40913,1732020464340 2024-11-19T12:47:44,771 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:47:44,773 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:47:44,773 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:47:44,773 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:47:44,773 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,40913,1732020464340 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:47:44,774 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:47:44,775 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020494776 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:47:44,776 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:47:44,777 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:47:44,777 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:47:44,777 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:47:44,778 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020464777,5,FailOnTimeoutGroup] 2024-11-19T12:47:44,778 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020464778,5,FailOnTimeoutGroup] 2024-11-19T12:47:44,778 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,778 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:47:44,778 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,778 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,778 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,778 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:47:44,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:47:44,787 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:47:44,789 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:47:44,789 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d 2024-11-19T12:47:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:47:44,800 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:47:44,801 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:44,801 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(746): ClusterId : 5445cbae-3ce9-424d-8edd-7ba045da008b 2024-11-19T12:47:44,801 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:47:44,802 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:47:44,804 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:47:44,804 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,804 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:44,805 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:47:44,806 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:47:44,806 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:44,807 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:47:44,808 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:47:44,808 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:44,809 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:47:44,810 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:47:44,810 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:44,811 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:47:44,811 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:47:44,811 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:44,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:47:44,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740 2024-11-19T12:47:44,812 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740 2024-11-19T12:47:44,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:47:44,813 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:47:44,814 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:47:44,815 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:47:44,817 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:47:44,817 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692000, jitterRate=-0.12007664144039154}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:47:44,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020464801Initializing all the Stores at 1732020464802 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020464802Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020464802Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020464802Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020464802Cleaning up temporary data from old regions at 1732020464813 (+11 ms)Region opened successfully at 1732020464818 (+5 ms) 2024-11-19T12:47:44,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:47:44,818 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:47:44,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:47:44,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:47:44,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:47:44,819 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:47:44,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020464818Disabling compacts and flushes for region at 1732020464818Disabling writes for close at 1732020464818Writing region close event to WAL at 1732020464819 (+1 ms)Closed at 1732020464819 2024-11-19T12:47:44,819 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:47:44,820 DEBUG [RS:0;aba5a916dfea:35045 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c4b315c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:47:44,820 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:47:44,820 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:47:44,820 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:47:44,822 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:47:44,823 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:47:44,836 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:35045 2024-11-19T12:47:44,836 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:47:44,836 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:47:44,836 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:47:44,837 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40913,1732020464340 with port=35045, startcode=1732020464477 2024-11-19T12:47:44,838 DEBUG [RS:0;aba5a916dfea:35045 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:47:44,840 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41231, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:47:44,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40913 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40913 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,842 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d 2024-11-19T12:47:44,842 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:33145 2024-11-19T12:47:44,842 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:47:44,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:47:44,852 DEBUG [RS:0;aba5a916dfea:35045 {}] zookeeper.ZKUtil(111): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,852 WARN [RS:0;aba5a916dfea:35045 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:47:44,852 INFO [RS:0;aba5a916dfea:35045 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:47:44,853 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,853 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,35045,1732020464477] 2024-11-19T12:47:44,857 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:47:44,859 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:47:44,859 INFO [RS:0;aba5a916dfea:35045 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:47:44,859 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,859 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:47:44,860 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:47:44,860 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:47:44,861 DEBUG [RS:0;aba5a916dfea:35045 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:47:44,862 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,862 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,863 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,863 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,863 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,863 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,35045,1732020464477-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:47:44,879 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:47:44,880 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,35045,1732020464477-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,880 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,880 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.Replication(171): aba5a916dfea,35045,1732020464477 started 2024-11-19T12:47:44,893 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:44,893 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,35045,1732020464477, RpcServer on aba5a916dfea/172.17.0.2:35045, sessionid=0x101546c7d8a0001 2024-11-19T12:47:44,893 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:47:44,893 DEBUG [RS:0;aba5a916dfea:35045 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,893 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,35045,1732020464477' 2024-11-19T12:47:44,893 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,35045,1732020464477 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,35045,1732020464477' 2024-11-19T12:47:44,894 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:47:44,895 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:47:44,895 DEBUG [RS:0;aba5a916dfea:35045 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:47:44,895 INFO [RS:0;aba5a916dfea:35045 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:47:44,895 INFO [RS:0;aba5a916dfea:35045 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:47:44,973 WARN [aba5a916dfea:40913 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:47:44,999 INFO [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C35045%2C1732020464477, suffix=, logDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477, archiveDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs, maxLogs=32 2024-11-19T12:47:45,001 INFO [RS:0;aba5a916dfea:35045 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:47:45,010 INFO [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:47:45,014 DEBUG [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-19T12:47:45,186 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:45,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:45,224 DEBUG [aba5a916dfea:40913 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:47:45,224 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:45,226 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,35045,1732020464477, state=OPENING 2024-11-19T12:47:45,285 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:47:45,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:45,294 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:47:45,296 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:47:45,296 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,35045,1732020464477}] 2024-11-19T12:47:45,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:47:45,296 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:47:45,453 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:47:45,458 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:59295, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:47:45,463 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:47:45,464 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:47:45,465 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C35045%2C1732020464477.meta, suffix=.meta, logDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477, archiveDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs, maxLogs=32 2024-11-19T12:47:45,466 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta 2024-11-19T12:47:45,470 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta 2024-11-19T12:47:45,471 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:41165:41165),(127.0.0.1/127.0.0.1:38877:38877)] 2024-11-19T12:47:45,473 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:47:45,473 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:47:45,473 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:47:45,474 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:47:45,474 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:47:45,474 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:45,474 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:47:45,474 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:47:45,476 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:47:45,477 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:47:45,477 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:45,478 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:47:45,479 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:47:45,479 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:45,480 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:47:45,481 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:47:45,481 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:45,482 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:47:45,483 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:47:45,483 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,484 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:47:45,484 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:47:45,485 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740 2024-11-19T12:47:45,486 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740 2024-11-19T12:47:45,487 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:47:45,488 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:47:45,488 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:47:45,490 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:47:45,491 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=692247, jitterRate=-0.11976267397403717}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:47:45,491 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:47:45,491 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020465474Writing region info on filesystem at 1732020465474Initializing all the Stores at 1732020465475 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020465475Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020465475Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020465476 (+1 ms)Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020465476Cleaning up temporary data from old regions at 1732020465488 (+12 ms)Running coprocessor post-open hooks at 1732020465491 (+3 ms)Region opened successfully at 1732020465491 2024-11-19T12:47:45,492 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020465452 2024-11-19T12:47:45,495 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:47:45,495 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:47:45,496 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:45,497 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,35045,1732020464477, state=OPEN 2024-11-19T12:47:45,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:47:45,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:47:45,525 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:45,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:47:45,525 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:47:45,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:47:45,530 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,35045,1732020464477 in 229 msec 2024-11-19T12:47:45,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:47:45,535 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 710 msec 2024-11-19T12:47:45,536 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:47:45,536 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:47:45,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:47:45,539 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,35045,1732020464477, seqNum=-1] 2024-11-19T12:47:45,539 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:47:45,541 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44017, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:47:45,550 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 776 msec 2024-11-19T12:47:45,550 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020465550, completionTime=-1 2024-11-19T12:47:45,551 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:47:45,551 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:47:45,553 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:47:45,553 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020525553 2024-11-19T12:47:45,553 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020585553 2024-11-19T12:47:45,553 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:40913, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,554 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,556 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.023sec 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:47:45,558 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:47:45,561 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:47:45,561 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:47:45,561 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40913,1732020464340-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:47:45,602 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@163af664, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:47:45,602 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,40913,-1 for getting cluster id 2024-11-19T12:47:45,602 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:47:45,604 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5445cbae-3ce9-424d-8edd-7ba045da008b' 2024-11-19T12:47:45,604 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:47:45,604 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5445cbae-3ce9-424d-8edd-7ba045da008b" 2024-11-19T12:47:45,604 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3153ac40, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:47:45,604 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,40913,-1] 2024-11-19T12:47:45,605 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:47:45,605 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:47:45,606 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57626, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:47:45,607 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5e8ee736, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:47:45,607 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:47:45,608 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,35045,1732020464477, seqNum=-1] 2024-11-19T12:47:45,608 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:47:45,610 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46870, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:47:45,612 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,40913,1732020464340 2024-11-19T12:47:45,612 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:47:45,615 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:47:45,615 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-19T12:47:45,616 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-19T12:47:45,616 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:47:45,617 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,40913,1732020464340 2024-11-19T12:47:45,617 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@37fdcc12 2024-11-19T12:47:45,617 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:47:45,619 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57640, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:47:45,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T12:47:45,619 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T12:47:45,620 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:47:45,621 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T12:47:45,622 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:47:45,622 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,622 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-19T12:47:45,623 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:47:45,623 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:47:45,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741835_1011 (size=395) 2024-11-19T12:47:45,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741835_1011 (size=395) 2024-11-19T12:47:45,631 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 587eec45fdc62e4cea654210d1c3ce1a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d 2024-11-19T12:47:45,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34575 is added to blk_1073741836_1012 (size=78) 2024-11-19T12:47:45,637 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37255 is added to blk_1073741836_1012 (size=78) 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 587eec45fdc62e4cea654210d1c3ce1a, disabling compactions & flushes 2024-11-19T12:47:45,638 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. after waiting 0 ms 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,638 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,638 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 587eec45fdc62e4cea654210d1c3ce1a: Waiting for close lock at 1732020465638Disabling compacts and flushes for region at 1732020465638Disabling writes for close at 1732020465638Writing region close event to WAL at 1732020465638Closed at 1732020465638 2024-11-19T12:47:45,640 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:47:45,640 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732020465640"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020465640"}]},"ts":"1732020465640"} 2024-11-19T12:47:45,643 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:47:45,644 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:47:45,644 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020465644"}]},"ts":"1732020465644"} 2024-11-19T12:47:45,646 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-19T12:47:45,646 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=587eec45fdc62e4cea654210d1c3ce1a, ASSIGN}] 2024-11-19T12:47:45,648 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=587eec45fdc62e4cea654210d1c3ce1a, ASSIGN 2024-11-19T12:47:45,649 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=587eec45fdc62e4cea654210d1c3ce1a, ASSIGN; state=OFFLINE, location=aba5a916dfea,35045,1732020464477; forceNewPlan=false, retain=false 2024-11-19T12:47:45,799 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=587eec45fdc62e4cea654210d1c3ce1a, regionState=OPENING, regionLocation=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:45,802 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=587eec45fdc62e4cea654210d1c3ce1a, ASSIGN because future has completed 2024-11-19T12:47:45,803 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 587eec45fdc62e4cea654210d1c3ce1a, server=aba5a916dfea,35045,1732020464477}] 2024-11-19T12:47:45,961 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,961 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 587eec45fdc62e4cea654210d1c3ce1a, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:47:45,961 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,961 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:47:45,962 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,962 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,963 INFO [StoreOpener-587eec45fdc62e4cea654210d1c3ce1a-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,965 INFO [StoreOpener-587eec45fdc62e4cea654210d1c3ce1a-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 587eec45fdc62e4cea654210d1c3ce1a columnFamilyName info 2024-11-19T12:47:45,965 DEBUG [StoreOpener-587eec45fdc62e4cea654210d1c3ce1a-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:47:45,965 INFO [StoreOpener-587eec45fdc62e4cea654210d1c3ce1a-1 {}] regionserver.HStore(327): Store=587eec45fdc62e4cea654210d1c3ce1a/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:47:45,966 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,966 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,967 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,967 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,967 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,969 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,972 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:47:45,972 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 587eec45fdc62e4cea654210d1c3ce1a; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=783343, jitterRate=-0.003928840160369873}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:47:45,972 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:47:45,973 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 587eec45fdc62e4cea654210d1c3ce1a: Running coprocessor pre-open hook at 1732020465962Writing region info on filesystem at 1732020465962Initializing all the Stores at 1732020465963 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020465963Cleaning up temporary data from old regions at 1732020465967 (+4 ms)Running coprocessor post-open hooks at 1732020465972 (+5 ms)Region opened successfully at 1732020465973 (+1 ms) 2024-11-19T12:47:45,974 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a., pid=6, masterSystemTime=1732020465956 2024-11-19T12:47:45,976 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,976 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:45,977 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=587eec45fdc62e4cea654210d1c3ce1a, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,35045,1732020464477 2024-11-19T12:47:45,980 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 587eec45fdc62e4cea654210d1c3ce1a, server=aba5a916dfea,35045,1732020464477 because future has completed 2024-11-19T12:47:45,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:47:45,984 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 587eec45fdc62e4cea654210d1c3ce1a, server=aba5a916dfea,35045,1732020464477 in 178 msec 2024-11-19T12:47:45,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:47:45,987 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=587eec45fdc62e4cea654210d1c3ce1a, ASSIGN in 338 msec 2024-11-19T12:47:45,988 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:47:45,988 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020465988"}]},"ts":"1732020465988"} 2024-11-19T12:47:45,990 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-19T12:47:45,991 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:47:45,994 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 372 msec 2024-11-19T12:47:46,187 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:46,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:47,188 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:47,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:48,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:48,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:48,877 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:47:48,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,900 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,901 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,904 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,905 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:48,907 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:47:49,189 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:49,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:50,190 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:50,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:50,857 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:47:50,859 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-19T12:47:51,192 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:51,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:52,193 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:52,198 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:53,194 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:53,199 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:53,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:47:53,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:47:53,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T12:47:53,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-19T12:47:53,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:47:53,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:47:53,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:47:53,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T12:47:54,195 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:54,200 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:55,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:55,201 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:55,631 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40913 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:47:55,632 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-19T12:47:55,632 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-19T12:47:55,641 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T12:47:55,641 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:47:55,645 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a., hostname=aba5a916dfea,35045,1732020464477, seqNum=2] 2024-11-19T12:47:56,196 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:56,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:57,197 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:57,202 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:57,648 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:47:57,649 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,649 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,649 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,651 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK], DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]) is bad. 2024-11-19T12:47:57,651 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK], DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]) is bad. 2024-11-19T12:47:57,651 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK], DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34575,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]) is bad. 2024-11-19T12:47:57,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:40370 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40370 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:40338 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40338 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,652 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1957110501_22 at /127.0.0.1:40300 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34575:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:40300 dst: /127.0.0.1:34575 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:54534 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54534 dst: /127.0.0.1:37255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1957110501_22 at /127.0.0.1:54500 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54500 dst: /127.0.0.1:37255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,654 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:54528 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:54528 dst: /127.0.0.1:37255 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,694 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@167013d4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:57,695 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@47718438{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:57,695 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:57,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5cd8260b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:57,695 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c66348c{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:57,696 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:57,697 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 7a844064-43d2-45eb-9393-7af3635df0eb) service to localhost/127.0.0.1:33145 2024-11-19T12:47:57,697 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:57,697 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:57,697 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data3/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:57,698 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data4/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:57,698 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:57,706 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:57,710 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:57,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:57,711 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:57,711 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:47:57,711 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3a3e1ac9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:57,712 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c7ab51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:57,805 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@9a967db{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-41253-hadoop-hdfs-3_4_1-tests_jar-_-any-12585414804448810423/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:57,805 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@e5554e2{HTTP/1.1, (http/1.1)}{localhost:41253} 2024-11-19T12:47:57,805 INFO [Time-limited test {}] server.Server(415): Started @171503ms 2024-11-19T12:47:57,806 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:57,821 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,821 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,821 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:57,821 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1957110501_22 at /127.0.0.1:57094 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57094 dst: /127.0.0.1:37255 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,821 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:57100 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57100 dst: /127.0.0.1:37255 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,821 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:57098 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:37255:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:57098 dst: /127.0.0.1:37255 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:47:57,822 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5bec5d92{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:57,823 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@21998c84{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:47:57,823 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:47:57,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19254d5f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:47:57,823 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3537f29{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:47:57,824 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:47:57,824 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:47:57,824 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 090351fd-5464-4140-9326-73ec0348ea96) service to localhost/127.0.0.1:33145 2024-11-19T12:47:57,824 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:47:57,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data1/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:57,825 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data2/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:47:57,825 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:47:57,835 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:47:57,839 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:47:57,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:47:57,841 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:47:57,841 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:47:57,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@26c3707{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:47:57,843 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59a2e487{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:47:57,935 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@40b8cc2f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-35873-hadoop-hdfs-3_4_1-tests_jar-_-any-11564514501525886085/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:47:57,935 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@58930156{HTTP/1.1, (http/1.1)}{localhost:35873} 2024-11-19T12:47:57,936 INFO [Time-limited test {}] server.Server(415): Started @171633ms 2024-11-19T12:47:57,937 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:47:58,177 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:58,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:58,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa052eb888d309266 with lease ID 0xa351f8c4ab2b9a54: from storage DS-64fd7608-9993-4752-89a9-a2adf44fe158 node DatanodeRegistration(127.0.0.1:35363, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=39921, infoSecurePort=0, ipcPort=43725, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:58,207 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xa052eb888d309266 with lease ID 0xa351f8c4ab2b9a54: from storage DS-ace0dfdb-4beb-4ccb-b677-a0f2c1497cff node DatanodeRegistration(127.0.0.1:35363, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=39921, infoSecurePort=0, ipcPort=43725, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:58,315 WARN [Thread-1352 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:47:58,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd557582b06edbe6b with lease ID 0xa351f8c4ab2b9a55: from storage DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7 node DatanodeRegistration(127.0.0.1:43231, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=43099, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-19T12:47:58,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd557582b06edbe6b with lease ID 0xa351f8c4ab2b9a55: from storage DS-e7ca1be6-deb1-4c84-95bf-5342aef250a3 node DatanodeRegistration(127.0.0.1:43231, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=43099, infoSecurePort=0, ipcPort=40591, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:47:58,954 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-19T12:47:58,956 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-19T12:47:58,957 ERROR [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:58,958 WARN [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:58,958 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C35045%2C1732020464477:(num 1732020465000) roll requested 2024-11-19T12:47:58,958 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:47:58,963 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 newFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:47:58,964 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:58,964 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:58,964 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:58,964 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:58,964 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:47:58,964 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:47:58,965 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:58,965 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:47:58,965 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:47:58,965 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39921:39921),(127.0.0.1/127.0.0.1:43099:43099)] 2024-11-19T12:47:58,965 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 is not closed yet, will try archiving it next time 2024-11-19T12:47:58,965 WARN [IPC Server handler 4 on default port 33145 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-19T12:47:58,966 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 after 1ms 2024-11-19T12:47:59,206 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:47:59,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:00,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:00,207 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:00,970 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-19T12:48:01,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:01,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:02,181 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T12:48:02,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:02,208 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:02,967 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 after 4002ms 2024-11-19T12:48:02,975 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:43231,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:02,976 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35363,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK], DatanodeInfoWithStorage[127.0.0.1:43231,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:43231,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]) is bad. 2024-11-19T12:48:02,976 WARN [PacketResponder: BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:43231] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:02,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:60068 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35363:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:60068 dst: /127.0.0.1:35363 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:02,977 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:58764 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:43231:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58764 dst: /127.0.0.1:43231 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:03,013 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@40b8cc2f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:03,014 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@58930156{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:48:03,014 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:48:03,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59a2e487{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:48:03,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@26c3707{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:48:03,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:48:03,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:48:03,017 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:48:03,017 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 090351fd-5464-4140-9326-73ec0348ea96) service to localhost/127.0.0.1:33145 2024-11-19T12:48:03,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data1/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:03,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data2/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:03,018 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:48:03,025 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:03,028 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:48:03,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:48:03,029 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:48:03,029 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:48:03,029 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@612bb63e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:48:03,030 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f5c91a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:48:03,123 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@33333ea{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-41501-hadoop-hdfs-3_4_1-tests_jar-_-any-6348399168519462754/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:03,124 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48ed4c24{HTTP/1.1, (http/1.1)}{localhost:41501} 2024-11-19T12:48:03,124 INFO [Time-limited test {}] server.Server(415): Started @176821ms 2024-11-19T12:48:03,125 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:48:03,143 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:03,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-943787546_22 at /127.0.0.1:36004 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35363:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36004 dst: /127.0.0.1:35363 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:03,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@9a967db{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:03,145 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@e5554e2{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:48:03,145 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:48:03,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c7ab51{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:48:03,145 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3a3e1ac9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:48:03,146 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:48:03,146 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:48:03,146 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 7a844064-43d2-45eb-9393-7af3635df0eb) service to localhost/127.0.0.1:33145 2024-11-19T12:48:03,146 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:48:03,147 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data3/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:03,147 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data4/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:03,147 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:48:03,156 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:03,159 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:48:03,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:48:03,160 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:48:03,160 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:48:03,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1892689f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:48:03,160 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7694c7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:48:03,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:03,209 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:03,254 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@443bbaa{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/java.io.tmpdir/jetty-localhost-44567-hadoop-hdfs-3_4_1-tests_jar-_-any-8602610445999458445/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:03,254 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@102e4779{HTTP/1.1, (http/1.1)}{localhost:44567} 2024-11-19T12:48:03,254 INFO [Time-limited test {}] server.Server(415): Started @176952ms 2024-11-19T12:48:03,255 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:48:03,502 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:48:03,504 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ef2cb79aedd60de with lease ID 0xa351f8c4ab2b9a56: from storage DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7 node DatanodeRegistration(127.0.0.1:44583, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=46239, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:03,505 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5ef2cb79aedd60de with lease ID 0xa351f8c4ab2b9a56: from storage DS-e7ca1be6-deb1-4c84-95bf-5342aef250a3 node DatanodeRegistration(127.0.0.1:44583, datanodeUuid=090351fd-5464-4140-9326-73ec0348ea96, infoPort=46239, infoSecurePort=0, ipcPort=32931, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:03,627 WARN [Thread-1426 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:48:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1cf529415228be6 with lease ID 0xa351f8c4ab2b9a57: from storage DS-64fd7608-9993-4752-89a9-a2adf44fe158 node DatanodeRegistration(127.0.0.1:32877, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=34307, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:03,630 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xc1cf529415228be6 with lease ID 0xa351f8c4ab2b9a57: from storage DS-ace0dfdb-4beb-4ccb-b677-a0f2c1497cff node DatanodeRegistration(127.0.0.1:32877, datanodeUuid=7a844064-43d2-45eb-9393-7af3635df0eb, infoPort=34307, infoSecurePort=0, ipcPort=46391, storageInfo=lv=-57;cid=testClusterID;nsid=1637363755;c=1732020462662), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:04,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:04,210 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:04,271 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-19T12:48:04,273 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-19T12:48:04,275 ERROR [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35363,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:04,275 WARN [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35363,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:04,275 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C35045%2C1732020464477:(num 1732020478958) roll requested 2024-11-19T12:48:04,276 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:04,283 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 newFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:04,283 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:04,283 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:04,283 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:04,283 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:04,284 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:04,284 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:04,284 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35363,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:04,284 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35363,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:04,284 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:48:04,285 WARN [IPC Server handler 0 on default port 33145 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-19T12:48:04,286 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 after 0ms 2024-11-19T12:48:04,286 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46239:46239),(127.0.0.1/127.0.0.1:34307:34307)] 2024-11-19T12:48:04,286 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 is not closed yet, will try archiving it next time 2024-11-19T12:48:05,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:05,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:06,211 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:06,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:06,288 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:06,294 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 newFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:06,295 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:06,295 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:06,295 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:06,295 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:06,295 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:06,295 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:06,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741838_1019 (size=1264) 2024-11-19T12:48:06,298 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741838_1019 (size=1264) 2024-11-19T12:48:06,298 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 is not closed yet, will try archiving it next time 2024-11-19T12:48:06,303 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34307:34307),(127.0.0.1/127.0.0.1:46239:46239)] 2024-11-19T12:48:06,303 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 is not closed yet, will try archiving it next time 2024-11-19T12:48:06,303 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:48:06,303 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:48:06,304 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 after 1ms 2024-11-19T12:48:06,304 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:48:06,315 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732020465973/Put/vlen=218/seqid=0] 2024-11-19T12:48:06,315 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732020475646/Put/vlen=1045/seqid=0] 2024-11-19T12:48:06,315 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020465000 2024-11-19T12:48:06,315 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:48:06,315 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:48:06,316 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 after 1ms 2024-11-19T12:48:06,316 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:48:06,321 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732020478957/Put/vlen=1045/seqid=0] 2024-11-19T12:48:06,321 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732020480972/Put/vlen=1045/seqid=0] 2024-11-19T12:48:06,321 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 2024-11-19T12:48:06,321 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:06,321 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:06,322 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 after 1ms 2024-11-19T12:48:06,322 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020484275 2024-11-19T12:48:06,327 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732020484275/Put/vlen=1045/seqid=0] 2024-11-19T12:48:06,327 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:06,327 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:06,327 WARN [IPC Server handler 3 on default port 33145 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-19T12:48:06,328 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 after 1ms 2024-11-19T12:48:06,632 WARN [ResponseProcessor for block BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:06,632 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1957110501_22 at /127.0.0.1:35442 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:32877:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:35442 dst: /127.0.0.1:32877 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:32877 remote=/127.0.0.1:35442]. Total timeout mills is 60000, 59661 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:06,633 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 block BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:32877,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK], DatanodeInfoWithStorage[127.0.0.1:44583,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:32877,DS-64fd7608-9993-4752-89a9-a2adf44fe158,DISK]) is bad. 2024-11-19T12:48:06,633 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1957110501_22 at /127.0.0.1:37614 [Receiving block BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:44583:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37614 dst: /127.0.0.1:44583 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:06,638 WARN [DataStreamer for file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 block BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:06,639 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741839_1022 (size=85) 2024-11-19T12:48:06,642 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741839_1022 (size=85) 2024-11-19T12:48:07,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:07,212 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:07,505 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T12:48:08,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:08,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:08,287 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020478958 after 4001ms 2024-11-19T12:48:09,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:09,213 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:10,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:10,214 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:10,328 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 after 4001ms 2024-11-19T12:48:10,328 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:10,332 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:10,333 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 587eec45fdc62e4cea654210d1c3ce1a 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-19T12:48:10,333 ERROR [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,334 WARN [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,334 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C35045%2C1732020464477:(num 1732020486287) roll requested 2024-11-19T12:48:10,334 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.1732020490334 2024-11-19T12:48:10,342 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 newFile=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020490334 2024-11-19T12:48:10,342 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,342 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,342 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,342 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,342 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,343 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020490334 2024-11-19T12:48:10,343 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,343 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34307:34307),(127.0.0.1/127.0.0.1:46239:46239)] 2024-11-19T12:48:10,343 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 is not closed yet, will try archiving it next time 2024-11-19T12:48:10,343 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-1869456203-172.17.0.2-1732020462662:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor103.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:10,344 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 after 0ms 2024-11-19T12:48:10,345 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.1732020486287 to hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs/aba5a916dfea%2C35045%2C1732020464477.1732020486287 2024-11-19T12:48:10,359 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/.tmp/info/f6ec27d86d0c4a7a91a2f9f55f0bec3c is 1080, key is row1002/info:/1732020475646/Put/seqid=0 2024-11-19T12:48:10,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741841_1024 (size=9270) 2024-11-19T12:48:10,364 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741841_1024 (size=9270) 2024-11-19T12:48:10,365 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/.tmp/info/f6ec27d86d0c4a7a91a2f9f55f0bec3c 2024-11-19T12:48:10,373 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/.tmp/info/f6ec27d86d0c4a7a91a2f9f55f0bec3c as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/info/f6ec27d86d0c4a7a91a2f9f55f0bec3c 2024-11-19T12:48:10,379 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/info/f6ec27d86d0c4a7a91a2f9f55f0bec3c, entries=4, sequenceid=8, filesize=9.1 K 2024-11-19T12:48:10,380 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 587eec45fdc62e4cea654210d1c3ce1a in 48ms, sequenceid=8, compaction requested=false 2024-11-19T12:48:10,380 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 587eec45fdc62e4cea654210d1c3ce1a: 2024-11-19T12:48:10,380 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-19T12:48:10,381 ERROR [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,381 WARN [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d-prefix:aba5a916dfea,35045,1732020464477.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,381 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C35045%2C1732020464477.meta:.meta(num 1732020465466) roll requested 2024-11-19T12:48:10,381 INFO [regionserver/aba5a916dfea:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C35045%2C1732020464477.meta.1732020490381.meta 2024-11-19T12:48:10,392 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,392 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,392 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,392 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,392 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,392 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020490381.meta 2024-11-19T12:48:10,392 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,393 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:10,393 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta 2024-11-19T12:48:10,393 WARN [IPC Server handler 1 on default port 33145 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta has not been closed. Lease recovery is in progress. RecoveryId = 1026 for block blk_1073741834_1015 2024-11-19T12:48:10,393 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta after 0ms 2024-11-19T12:48:10,394 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34307:34307),(127.0.0.1/127.0.0.1:46239:46239)] 2024-11-19T12:48:10,395 DEBUG [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta is not closed yet, will try archiving it next time 2024-11-19T12:48:10,413 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/info/bdd0443d64664d7182c7eb04cf67a3a3 is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a./info:regioninfo/1732020465977/Put/seqid=0 2024-11-19T12:48:10,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741843_1027 (size=7125) 2024-11-19T12:48:10,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741843_1027 (size=7125) 2024-11-19T12:48:10,422 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/info/bdd0443d64664d7182c7eb04cf67a3a3 2024-11-19T12:48:10,443 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/ns/b98c7058fdda4cbd8693aec5fd7faf19 is 43, key is default/ns:d/1732020465542/Put/seqid=0 2024-11-19T12:48:10,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741844_1028 (size=5153) 2024-11-19T12:48:10,449 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741844_1028 (size=5153) 2024-11-19T12:48:10,449 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/ns/b98c7058fdda4cbd8693aec5fd7faf19 2024-11-19T12:48:10,471 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/table/c7969b4ae5474e9789d2ac6f5af70509 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732020465988/Put/seqid=0 2024-11-19T12:48:10,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741845_1029 (size=5438) 2024-11-19T12:48:10,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741845_1029 (size=5438) 2024-11-19T12:48:10,477 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/table/c7969b4ae5474e9789d2ac6f5af70509 2024-11-19T12:48:10,487 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/info/bdd0443d64664d7182c7eb04cf67a3a3 as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/info/bdd0443d64664d7182c7eb04cf67a3a3 2024-11-19T12:48:10,495 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/info/bdd0443d64664d7182c7eb04cf67a3a3, entries=10, sequenceid=11, filesize=7.0 K 2024-11-19T12:48:10,497 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/ns/b98c7058fdda4cbd8693aec5fd7faf19 as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/ns/b98c7058fdda4cbd8693aec5fd7faf19 2024-11-19T12:48:10,506 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/ns/b98c7058fdda4cbd8693aec5fd7faf19, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:48:10,508 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/.tmp/table/c7969b4ae5474e9789d2ac6f5af70509 as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/table/c7969b4ae5474e9789d2ac6f5af70509 2024-11-19T12:48:10,517 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/table/c7969b4ae5474e9789d2ac6f5af70509, entries=2, sequenceid=11, filesize=5.3 K 2024-11-19T12:48:10,519 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 139ms, sequenceid=11, compaction requested=false 2024-11-19T12:48:10,519 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T12:48:10,527 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:48:10,527 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:48:10,527 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:48:10,527 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:10,528 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:10,528 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:48:10,528 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:48:10,528 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=59028225, stopped=false 2024-11-19T12:48:10,528 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,40913,1732020464340 2024-11-19T12:48:10,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:48:10,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:48:10,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:10,573 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:10,574 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:48:10,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:48:10,574 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:48:10,575 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:48:10,575 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:48:10,575 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:10,575 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,35045,1732020464477' ***** 2024-11-19T12:48:10,575 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:48:10,576 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(3091): Received CLOSE for 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,35045,1732020464477 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:48:10,576 INFO [RS:0;aba5a916dfea:35045 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:35045. 2024-11-19T12:48:10,577 DEBUG [RS:0;aba5a916dfea:35045 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 587eec45fdc62e4cea654210d1c3ce1a, disabling compactions & flushes 2024-11-19T12:48:10,577 DEBUG [RS:0;aba5a916dfea:35045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:10,577 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:48:10,577 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. after waiting 0 ms 2024-11-19T12:48:10,577 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:48:10,577 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:48:10,577 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:48:10,577 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T12:48:10,577 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1325): Online Regions={587eec45fdc62e4cea654210d1c3ce1a=TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:48:10,577 DEBUG [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 587eec45fdc62e4cea654210d1c3ce1a 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:48:10,577 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:48:10,577 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:48:10,588 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/default/TestLogRolling-testLogRollOnPipelineRestart/587eec45fdc62e4cea654210d1c3ce1a/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-19T12:48:10,588 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:48:10,589 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 587eec45fdc62e4cea654210d1c3ce1a: Waiting for close lock at 1732020490576Running coprocessor pre-close hooks at 1732020490576Disabling compacts and flushes for region at 1732020490576Disabling writes for close at 1732020490577 (+1 ms)Writing region close event to WAL at 1732020490578 (+1 ms)Running coprocessor post-close hooks at 1732020490588 (+10 ms)Closed at 1732020490588 2024-11-19T12:48:10,589 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732020465619.587eec45fdc62e4cea654210d1c3ce1a. 2024-11-19T12:48:10,592 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:48:10,593 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:48:10,593 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:48:10,593 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020490577Running coprocessor pre-close hooks at 1732020490577Disabling compacts and flushes for region at 1732020490577Disabling writes for close at 1732020490577Writing region close event to WAL at 1732020490588 (+11 ms)Running coprocessor post-close hooks at 1732020490593 (+5 ms)Closed at 1732020490593 2024-11-19T12:48:10,593 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:48:10,777 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,35045,1732020464477; all regions closed. 2024-11-19T12:48:10,778 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,778 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,778 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,778 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,778 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:10,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741842_1025 (size=825) 2024-11-19T12:48:10,781 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741842_1025 (size=825) 2024-11-19T12:48:10,865 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:48:10,865 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:48:10,866 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:48:11,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:11,215 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:12,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:12,216 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:13,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:13,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:48:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:48:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-19T12:48:13,631 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T12:48:14,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:14,217 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:14,323 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:48:14,394 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta after 4001ms 2024-11-19T12:48:14,395 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/WALs/aba5a916dfea,35045,1732020464477/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta to hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs/aba5a916dfea%2C35045%2C1732020464477.meta.1732020465466.meta 2024-11-19T12:48:14,397 DEBUG [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs 2024-11-19T12:48:14,398 INFO [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C35045%2C1732020464477.meta:.meta(num 1732020490381) 2024-11-19T12:48:14,398 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,398 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,398 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,398 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,398 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741840_1023 (size=1162) 2024-11-19T12:48:14,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741840_1023 (size=1162) 2024-11-19T12:48:14,406 DEBUG [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C35045%2C1732020464477:(num 1732020490334) 2024-11-19T12:48:14,407 DEBUG [RS:0;aba5a916dfea:35045 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:48:14,407 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:48:14,407 INFO [RS:0;aba5a916dfea:35045 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35045 2024-11-19T12:48:14,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,35045,1732020464477 2024-11-19T12:48:14,460 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:48:14,460 INFO [RS:0;aba5a916dfea:35045 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:48:14,468 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,35045,1732020464477] 2024-11-19T12:48:14,477 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,35045,1732020464477 already deleted, retry=false 2024-11-19T12:48:14,477 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,35045,1732020464477 expired; onlineServers=0 2024-11-19T12:48:14,477 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,40913,1732020464340' ***** 2024-11-19T12:48:14,477 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:48:14,477 INFO [M:0;aba5a916dfea:40913 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:48:14,477 INFO [M:0;aba5a916dfea:40913 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:48:14,477 DEBUG [M:0;aba5a916dfea:40913 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:48:14,477 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:48:14,477 DEBUG [M:0;aba5a916dfea:40913 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:48:14,477 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020464778 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020464778,5,FailOnTimeoutGroup] 2024-11-19T12:48:14,477 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020464777 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020464777,5,FailOnTimeoutGroup] 2024-11-19T12:48:14,477 INFO [M:0;aba5a916dfea:40913 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:48:14,477 INFO [M:0;aba5a916dfea:40913 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:48:14,477 DEBUG [M:0;aba5a916dfea:40913 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:48:14,477 INFO [M:0;aba5a916dfea:40913 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:48:14,478 INFO [M:0;aba5a916dfea:40913 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:48:14,478 INFO [M:0;aba5a916dfea:40913 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:48:14,478 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:48:14,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:48:14,485 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:14,485 DEBUG [M:0;aba5a916dfea:40913 {}] zookeeper.ZKUtil(347): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:48:14,485 WARN [M:0;aba5a916dfea:40913 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:48:14,486 INFO [M:0;aba5a916dfea:40913 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/.lastflushedseqids 2024-11-19T12:48:14,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741846_1030 (size=130) 2024-11-19T12:48:14,495 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741846_1030 (size=130) 2024-11-19T12:48:14,495 INFO [M:0;aba5a916dfea:40913 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:48:14,495 INFO [M:0;aba5a916dfea:40913 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:48:14,495 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:48:14,495 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:14,495 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:14,495 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:48:14,495 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:14,496 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-19T12:48:14,496 ERROR [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData-prefix:aba5a916dfea,40913,1732020464340 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:14,496 WARN [FSHLog-0-hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData-prefix:aba5a916dfea,40913,1732020464340 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:14,496 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog aba5a916dfea%2C40913%2C1732020464340:(num 1732020464596) roll requested 2024-11-19T12:48:14,496 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40913%2C1732020464340.1732020494496 2024-11-19T12:48:14,501 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,501 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,501 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,501 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,501 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:14,502 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020494496 2024-11-19T12:48:14,502 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:14,502 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:37255,DS-50ffdbe8-8e6c-480a-a4c7-2baaee724fc7,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-19T12:48:14,502 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 2024-11-19T12:48:14,503 WARN [IPC Server handler 0 on default port 33145 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-19T12:48:14,503 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 after 1ms 2024-11-19T12:48:14,515 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34307:34307),(127.0.0.1/127.0.0.1:46239:46239)] 2024-11-19T12:48:14,515 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 is not closed yet, will try archiving it next time 2024-11-19T12:48:14,530 DEBUG [M:0;aba5a916dfea:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/026774db84cb47a39b97d47de8ad3ffe is 82, key is hbase:meta,,1/info:regioninfo/1732020465496/Put/seqid=0 2024-11-19T12:48:14,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741848_1033 (size=5672) 2024-11-19T12:48:14,544 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741848_1033 (size=5672) 2024-11-19T12:48:14,545 INFO [M:0;aba5a916dfea:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/026774db84cb47a39b97d47de8ad3ffe 2024-11-19T12:48:14,568 DEBUG [M:0;aba5a916dfea:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1db04643a9b841a5a0c684d033ee13e2 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020465993/Put/seqid=0 2024-11-19T12:48:14,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:48:14,578 INFO [RS:0;aba5a916dfea:35045 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:48:14,579 INFO [RS:0;aba5a916dfea:35045 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,35045,1732020464477; zookeeper connection closed. 2024-11-19T12:48:14,579 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35045-0x101546c7d8a0001, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:48:14,581 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@543620ca {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@543620ca 2024-11-19T12:48:14,582 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:48:14,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741849_1034 (size=6119) 2024-11-19T12:48:14,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741849_1034 (size=6119) 2024-11-19T12:48:14,987 INFO [M:0;aba5a916dfea:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1db04643a9b841a5a0c684d033ee13e2 2024-11-19T12:48:15,009 DEBUG [M:0;aba5a916dfea:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d422cfa074d540a58b6d37053fdf0baf is 69, key is aba5a916dfea,35045,1732020464477/rs:state/1732020464841/Put/seqid=0 2024-11-19T12:48:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741850_1035 (size=5156) 2024-11-19T12:48:15,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741850_1035 (size=5156) 2024-11-19T12:48:15,015 INFO [M:0;aba5a916dfea:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d422cfa074d540a58b6d37053fdf0baf 2024-11-19T12:48:15,033 DEBUG [M:0;aba5a916dfea:40913 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/527144269f504b42aa2fd5af7bd6a760 is 52, key is load_balancer_on/state:d/1732020465614/Put/seqid=0 2024-11-19T12:48:15,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741851_1036 (size=5056) 2024-11-19T12:48:15,038 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741851_1036 (size=5056) 2024-11-19T12:48:15,038 INFO [M:0;aba5a916dfea:40913 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/527144269f504b42aa2fd5af7bd6a760 2024-11-19T12:48:15,044 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/026774db84cb47a39b97d47de8ad3ffe as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/026774db84cb47a39b97d47de8ad3ffe 2024-11-19T12:48:15,049 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/026774db84cb47a39b97d47de8ad3ffe, entries=8, sequenceid=56, filesize=5.5 K 2024-11-19T12:48:15,050 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1db04643a9b841a5a0c684d033ee13e2 as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1db04643a9b841a5a0c684d033ee13e2 2024-11-19T12:48:15,056 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1db04643a9b841a5a0c684d033ee13e2, entries=6, sequenceid=56, filesize=6.0 K 2024-11-19T12:48:15,057 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/d422cfa074d540a58b6d37053fdf0baf as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d422cfa074d540a58b6d37053fdf0baf 2024-11-19T12:48:15,062 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/d422cfa074d540a58b6d37053fdf0baf, entries=1, sequenceid=56, filesize=5.0 K 2024-11-19T12:48:15,064 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/527144269f504b42aa2fd5af7bd6a760 as hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/527144269f504b42aa2fd5af7bd6a760 2024-11-19T12:48:15,069 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/527144269f504b42aa2fd5af7bd6a760, entries=1, sequenceid=56, filesize=4.9 K 2024-11-19T12:48:15,070 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 575ms, sequenceid=56, compaction requested=false 2024-11-19T12:48:15,071 INFO [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:15,072 DEBUG [M:0;aba5a916dfea:40913 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020494495Disabling compacts and flushes for region at 1732020494495Disabling writes for close at 1732020494495Obtaining lock to block concurrent updates at 1732020494496 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020494496Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732020494496Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020494515 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020494515Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020494529 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020494530 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020494550 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020494567 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020494567Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020494995 (+428 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020495009 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020495009Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020495019 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020495033 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020495033Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6b0fb06d: reopening flushed file at 1732020495043 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5d442ba: reopening flushed file at 1732020495050 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1dc06bad: reopening flushed file at 1732020495056 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ab61a4b: reopening flushed file at 1732020495063 (+7 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 575ms, sequenceid=56, compaction requested=false at 1732020495070 (+7 ms)Writing region close event to WAL at 1732020495071 (+1 ms)Closed at 1732020495071 2024-11-19T12:48:15,072 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:15,072 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:15,072 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:15,072 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:15,072 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:15,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32877 is added to blk_1073741847_1031 (size=757) 2024-11-19T12:48:15,074 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:44583 is added to blk_1073741847_1031 (size=757) 2024-11-19T12:48:15,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:15,218 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:15,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,589 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,609 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,610 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,614 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,617 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:15,622 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,125 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:48:16,126 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,127 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,147 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,148 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,149 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,153 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,156 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:16,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:16,219 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:16,631 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-19T12:48:17,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:17,220 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:18,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:18,221 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:18,504 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 after 4002ms 2024-11-19T12:48:18,505 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/WALs/aba5a916dfea,40913,1732020464340/aba5a916dfea%2C40913%2C1732020464340.1732020464596 to hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/oldWALs/aba5a916dfea%2C40913%2C1732020464340.1732020464596 2024-11-19T12:48:18,508 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/MasterData/oldWALs/aba5a916dfea%2C40913%2C1732020464340.1732020464596 to hdfs://localhost:33145/user/jenkins/test-data/424da97f-c15e-4d57-c819-570cea89603d/oldWALs/aba5a916dfea%2C40913%2C1732020464340.1732020464596$masterlocalwal$ 2024-11-19T12:48:18,509 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:48:18,509 INFO [M:0;aba5a916dfea:40913 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:48:18,509 INFO [M:0;aba5a916dfea:40913 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40913 2024-11-19T12:48:18,509 INFO [M:0;aba5a916dfea:40913 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:48:18,659 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:48:18,659 INFO [M:0;aba5a916dfea:40913 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:48:18,660 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40913-0x101546c7d8a0000, quorum=127.0.0.1:52390, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:48:18,662 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@443bbaa{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:18,662 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@102e4779{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:48:18,663 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:48:18,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7694c7c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:48:18,663 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1892689f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:48:18,664 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:48:18,664 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:48:18,665 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:48:18,665 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 7a844064-43d2-45eb-9393-7af3635df0eb) service to localhost/127.0.0.1:33145 2024-11-19T12:48:18,665 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data3/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:18,666 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data4/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:18,666 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:48:18,672 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@33333ea{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:18,673 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48ed4c24{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:48:18,673 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:48:18,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f5c91a5{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:48:18,673 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@612bb63e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:48:18,674 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:48:18,674 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:48:18,674 WARN [BP-1869456203-172.17.0.2-1732020462662 heartbeating to localhost/127.0.0.1:33145 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1869456203-172.17.0.2-1732020462662 (Datanode Uuid 090351fd-5464-4140-9326-73ec0348ea96) service to localhost/127.0.0.1:33145 2024-11-19T12:48:18,674 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:48:18,675 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data1/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:18,675 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/cluster_654a768f-fac2-e84b-c351-6741ed53cee2/data/data2/current/BP-1869456203-172.17.0.2-1732020462662 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:48:18,675 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:48:18,681 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43123a6e{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:48:18,681 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@633966fa{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:48:18,681 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:48:18,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5260e8b3{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:48:18,682 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@675d0ec1{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir/,STOPPED} 2024-11-19T12:48:18,688 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:48:18,712 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:48:18,720 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33145 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33145 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:33145 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:33145 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:33145 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:33145 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=186 (was 186), ProcessCount=11 (was 11), AvailableMemoryMB=5844 (was 6346) 2024-11-19T12:48:18,727 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=186, ProcessCount=11, AvailableMemoryMB=5843 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.log.dir so I do NOT create it in target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/46d53ec8-f0a3-3d1b-84d7-99fe09d13791/hadoop.tmp.dir so I do NOT create it in target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993, deleteOnExit=true 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/test.cache.data in system properties and HBase conf 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:48:18,728 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:48:18,729 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:48:18,729 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:48:18,730 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:48:18,743 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:48:19,029 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:19,034 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:48:19,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:48:19,051 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:48:19,051 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:48:19,052 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:19,052 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@47aef258{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:48:19,053 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@610a5e11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:48:19,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@287bf7ab{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/java.io.tmpdir/jetty-localhost-46143-hadoop-hdfs-3_4_1-tests_jar-_-any-15605979403990806825/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:48:19,168 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@dfb0819{HTTP/1.1, (http/1.1)}{localhost:46143} 2024-11-19T12:48:19,168 INFO [Time-limited test {}] server.Server(415): Started @192866ms 2024-11-19T12:48:19,181 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:48:19,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:19,222 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:19,402 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:19,405 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:48:19,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:48:19,406 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:48:19,406 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:48:19,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4369974{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:48:19,406 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7a6a3c52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:48:19,511 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a4ab4a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/java.io.tmpdir/jetty-localhost-40877-hadoop-hdfs-3_4_1-tests_jar-_-any-16330274991831828707/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:19,512 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2fb77c6e{HTTP/1.1, (http/1.1)}{localhost:40877} 2024-11-19T12:48:19,512 INFO [Time-limited test {}] server.Server(415): Started @193209ms 2024-11-19T12:48:19,513 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:48:19,543 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:48:19,546 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:48:19,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:48:19,547 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:48:19,547 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:48:19,547 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f5820{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:48:19,548 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5332e2ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:48:19,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@22de9ffd{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/java.io.tmpdir/jetty-localhost-37261-hadoop-hdfs-3_4_1-tests_jar-_-any-10546111377987672244/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:48:19,663 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@11be9bab{HTTP/1.1, (http/1.1)}{localhost:37261} 2024-11-19T12:48:19,663 INFO [Time-limited test {}] server.Server(415): Started @193361ms 2024-11-19T12:48:19,664 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:48:20,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:20,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:20,295 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data1/current/BP-881519828-172.17.0.2-1732020498751/current, will proceed with Du for space computation calculation, 2024-11-19T12:48:20,295 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data2/current/BP-881519828-172.17.0.2-1732020498751/current, will proceed with Du for space computation calculation, 2024-11-19T12:48:20,314 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:48:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14e08d7c13127205 with lease ID 0xd23dcbf5473c601c: Processing first storage report for DS-41f1bdfc-be24-433c-91b6-2361b1e09cd3 from datanode DatanodeRegistration(127.0.0.1:40915, datanodeUuid=7f60838b-cb9c-49ce-9d95-5c41fcb53d6c, infoPort=35713, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751) 2024-11-19T12:48:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14e08d7c13127205 with lease ID 0xd23dcbf5473c601c: from storage DS-41f1bdfc-be24-433c-91b6-2361b1e09cd3 node DatanodeRegistration(127.0.0.1:40915, datanodeUuid=7f60838b-cb9c-49ce-9d95-5c41fcb53d6c, infoPort=35713, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x14e08d7c13127205 with lease ID 0xd23dcbf5473c601c: Processing first storage report for DS-7d355d57-5d4a-46d8-a3fb-b80c0de7d81a from datanode DatanodeRegistration(127.0.0.1:40915, datanodeUuid=7f60838b-cb9c-49ce-9d95-5c41fcb53d6c, infoPort=35713, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751) 2024-11-19T12:48:20,317 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x14e08d7c13127205 with lease ID 0xd23dcbf5473c601c: from storage DS-7d355d57-5d4a-46d8-a3fb-b80c0de7d81a node DatanodeRegistration(127.0.0.1:40915, datanodeUuid=7f60838b-cb9c-49ce-9d95-5c41fcb53d6c, infoPort=35713, infoSecurePort=0, ipcPort=39829, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:20,430 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data3/current/BP-881519828-172.17.0.2-1732020498751/current, will proceed with Du for space computation calculation, 2024-11-19T12:48:20,430 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data4/current/BP-881519828-172.17.0.2-1732020498751/current, will proceed with Du for space computation calculation, 2024-11-19T12:48:20,450 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:48:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa8300155b167 with lease ID 0xd23dcbf5473c601d: Processing first storage report for DS-cb4eaa9a-9e71-4753-b3db-4718a70ab02f from datanode DatanodeRegistration(127.0.0.1:39143, datanodeUuid=78a264eb-ae5a-4f93-a93b-43a9b49ad14c, infoPort=44149, infoSecurePort=0, ipcPort=41749, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751) 2024-11-19T12:48:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa8300155b167 with lease ID 0xd23dcbf5473c601d: from storage DS-cb4eaa9a-9e71-4753-b3db-4718a70ab02f node DatanodeRegistration(127.0.0.1:39143, datanodeUuid=78a264eb-ae5a-4f93-a93b-43a9b49ad14c, infoPort=44149, infoSecurePort=0, ipcPort=41749, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2fa8300155b167 with lease ID 0xd23dcbf5473c601d: Processing first storage report for DS-bd0ad86e-0afd-4151-ac73-45362059444a from datanode DatanodeRegistration(127.0.0.1:39143, datanodeUuid=78a264eb-ae5a-4f93-a93b-43a9b49ad14c, infoPort=44149, infoSecurePort=0, ipcPort=41749, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751) 2024-11-19T12:48:20,452 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2fa8300155b167 with lease ID 0xd23dcbf5473c601d: from storage DS-bd0ad86e-0afd-4151-ac73-45362059444a node DatanodeRegistration(127.0.0.1:39143, datanodeUuid=78a264eb-ae5a-4f93-a93b-43a9b49ad14c, infoPort=44149, infoSecurePort=0, ipcPort=41749, storageInfo=lv=-57;cid=testClusterID;nsid=763340579;c=1732020498751), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:48:20,497 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b 2024-11-19T12:48:20,529 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/zookeeper_0, clientPort=63076, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:48:20,531 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=63076 2024-11-19T12:48:20,531 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,533 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:48:20,542 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:48:20,543 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0 with version=8 2024-11-19T12:48:20,543 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:48:20,545 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:48:20,545 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:48:20,546 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:48:20,547 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40617 2024-11-19T12:48:20,548 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:40617 connecting to ZooKeeper ensemble=127.0.0.1:63076 2024-11-19T12:48:20,603 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:406170x0, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:48:20,604 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:40617-0x101546d0af60000 connected 2024-11-19T12:48:20,664 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,666 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,670 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:48:20,671 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0, hbase.cluster.distributed=false 2024-11-19T12:48:20,673 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:48:20,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40617 2024-11-19T12:48:20,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40617 2024-11-19T12:48:20,674 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40617 2024-11-19T12:48:20,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40617 2024-11-19T12:48:20,675 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40617 2024-11-19T12:48:20,692 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:48:20,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,692 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:48:20,692 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:48:20,693 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:48:20,693 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:48:20,693 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:48:20,695 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43139 2024-11-19T12:48:20,696 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43139 connecting to ZooKeeper ensemble=127.0.0.1:63076 2024-11-19T12:48:20,697 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,699 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,713 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431390x0, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:48:20,713 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:431390x0, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:48:20,713 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43139-0x101546d0af60001 connected 2024-11-19T12:48:20,713 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:48:20,714 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:48:20,714 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:48:20,715 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:48:20,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43139 2024-11-19T12:48:20,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43139 2024-11-19T12:48:20,716 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43139 2024-11-19T12:48:20,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43139 2024-11-19T12:48:20,717 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43139 2024-11-19T12:48:20,731 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:40617 2024-11-19T12:48:20,731 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:48:20,739 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:48:20,739 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:48:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,747 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,748 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:48:20,748 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,40617,1732020500545 from backup master directory 2024-11-19T12:48:20,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:48:20,755 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:48:20,755 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:48:20,755 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,760 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/hbase.id] with ID: 957e2a71-d987-4072-9c6e-f2fd67e59ac0 2024-11-19T12:48:20,760 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/.tmp/hbase.id 2024-11-19T12:48:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:48:20,767 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:48:20,768 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/.tmp/hbase.id]:[hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/hbase.id] 2024-11-19T12:48:20,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:20,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:48:20,784 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-19T12:48:20,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,789 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:48:20,801 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:48:20,803 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:48:20,803 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:48:20,804 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:48:20,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:48:20,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:48:20,816 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store 2024-11-19T12:48:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:48:20,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:48:20,824 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:20,824 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:48:20,824 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020500824Disabling compacts and flushes for region at 1732020500824Disabling writes for close at 1732020500824Writing region close event to WAL at 1732020500824Closed at 1732020500824 2024-11-19T12:48:20,825 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/.initializing 2024-11-19T12:48:20,825 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/WALs/aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,827 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40617%2C1732020500545, suffix=, logDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/WALs/aba5a916dfea,40617,1732020500545, archiveDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/oldWALs, maxLogs=10 2024-11-19T12:48:20,828 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40617%2C1732020500545.1732020500827 2024-11-19T12:48:20,832 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/WALs/aba5a916dfea,40617,1732020500545/aba5a916dfea%2C40617%2C1732020500545.1732020500827 2024-11-19T12:48:20,833 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35713:35713),(127.0.0.1/127.0.0.1:44149:44149)] 2024-11-19T12:48:20,834 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:48:20,834 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:20,834 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,834 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,847 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,848 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:48:20,848 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:20,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:20,849 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,850 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:48:20,850 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:20,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:48:20,851 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,852 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:48:20,852 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:20,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:48:20,853 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:48:20,854 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:20,854 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:48:20,855 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,855 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,856 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,857 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,857 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,858 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:48:20,860 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:48:20,862 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:48:20,862 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=756895, jitterRate=-0.0375584214925766}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:48:20,863 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020500834Initializing all the Stores at 1732020500835 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020500835Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020500846 (+11 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020500846Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020500846Cleaning up temporary data from old regions at 1732020500857 (+11 ms)Region opened successfully at 1732020500863 (+6 ms) 2024-11-19T12:48:20,863 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:48:20,867 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@44ec175f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:48:20,868 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:48:20,868 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:48:20,868 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:48:20,869 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:48:20,869 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:48:20,869 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:48:20,870 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:48:20,872 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:48:20,873 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:48:20,896 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:48:20,897 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:48:20,897 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:48:20,905 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:48:20,906 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:48:20,907 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:48:20,913 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:48:20,914 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:48:20,922 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:48:20,924 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:48:20,930 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:48:20,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:48:20,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:48:20,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,939 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,40617,1732020500545, sessionid=0x101546d0af60000, setting cluster-up flag (Was=false) 2024-11-19T12:48:20,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,955 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,980 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:48:20,981 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40617,1732020500545 2024-11-19T12:48:20,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:20,997 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:21,022 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:48:21,023 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,40617,1732020500545 2024-11-19T12:48:21,025 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:48:21,026 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:48:21,027 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:48:21,027 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:48:21,027 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,40617,1732020500545 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:48:21,029 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020531030 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:48:21,030 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:48:21,031 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,031 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:48:21,031 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:48:21,031 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:48:21,031 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:48:21,031 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:48:21,032 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020501032,5,FailOnTimeoutGroup] 2024-11-19T12:48:21,032 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020501032,5,FailOnTimeoutGroup] 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,032 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,032 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,033 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:48:21,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:48:21,039 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:48:21,040 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:48:21,041 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0 2024-11-19T12:48:21,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:48:21,046 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:48:21,047 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:21,048 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:48:21,049 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:48:21,050 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,050 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:48:21,051 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:48:21,051 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,052 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:48:21,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:48:21,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,053 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:48:21,055 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:48:21,055 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,055 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,055 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:48:21,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740 2024-11-19T12:48:21,056 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740 2024-11-19T12:48:21,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:48:21,058 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:48:21,058 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:48:21,060 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:48:21,062 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:48:21,063 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696724, jitterRate=-0.11407008767127991}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020501047Initializing all the Stores at 1732020501048 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501048Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501048Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020501048Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501048Cleaning up temporary data from old regions at 1732020501058 (+10 ms)Region opened successfully at 1732020501064 (+6 ms) 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:48:21,064 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:48:21,064 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:48:21,064 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020501064Disabling compacts and flushes for region at 1732020501064Disabling writes for close at 1732020501064Writing region close event to WAL at 1732020501064Closed at 1732020501064 2024-11-19T12:48:21,066 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:48:21,066 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:48:21,066 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:48:21,068 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:48:21,069 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:48:21,119 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(746): ClusterId : 957e2a71-d987-4072-9c6e-f2fd67e59ac0 2024-11-19T12:48:21,119 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:48:21,131 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:48:21,131 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:48:21,139 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:48:21,140 DEBUG [RS:0;aba5a916dfea:43139 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@25a87370, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:48:21,155 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:43139 2024-11-19T12:48:21,155 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:48:21,155 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:48:21,155 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:48:21,156 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,40617,1732020500545 with port=43139, startcode=1732020500692 2024-11-19T12:48:21,156 DEBUG [RS:0;aba5a916dfea:43139 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:48:21,158 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48677, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:48:21,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40617 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,158 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=40617 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,160 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0 2024-11-19T12:48:21,160 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36925 2024-11-19T12:48:21,160 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:48:21,171 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:48:21,171 DEBUG [RS:0;aba5a916dfea:43139 {}] zookeeper.ZKUtil(111): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,172 WARN [RS:0;aba5a916dfea:43139 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:48:21,172 INFO [RS:0;aba5a916dfea:43139 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:48:21,172 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,172 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,43139,1732020500692] 2024-11-19T12:48:21,176 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:48:21,178 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:48:21,178 INFO [RS:0;aba5a916dfea:43139 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:48:21,178 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,179 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:48:21,179 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:48:21,180 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,180 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,181 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,181 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:48:21,181 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:48:21,181 DEBUG [RS:0;aba5a916dfea:43139 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,183 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,43139,1732020500692-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:48:21,208 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:48:21,208 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,43139,1732020500692-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,208 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,208 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.Replication(171): aba5a916dfea,43139,1732020500692 started 2024-11-19T12:48:21,219 WARN [aba5a916dfea:40617 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:48:21,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:21,223 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:21,226 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,226 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,43139,1732020500692, RpcServer on aba5a916dfea/172.17.0.2:43139, sessionid=0x101546d0af60001 2024-11-19T12:48:21,226 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:48:21,226 DEBUG [RS:0;aba5a916dfea:43139 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,226 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,43139,1732020500692' 2024-11-19T12:48:21,226 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,43139,1732020500692' 2024-11-19T12:48:21,227 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:48:21,228 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:48:21,228 DEBUG [RS:0;aba5a916dfea:43139 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:48:21,228 INFO [RS:0;aba5a916dfea:43139 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:48:21,228 INFO [RS:0;aba5a916dfea:43139 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:48:21,330 INFO [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C43139%2C1732020500692, suffix=, logDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692, archiveDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs, maxLogs=32 2024-11-19T12:48:21,331 INFO [RS:0;aba5a916dfea:43139 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43139%2C1732020500692.1732020501331 2024-11-19T12:48:21,337 INFO [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020501331 2024-11-19T12:48:21,339 DEBUG [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44149:44149),(127.0.0.1/127.0.0.1:35713:35713)] 2024-11-19T12:48:21,470 DEBUG [aba5a916dfea:40617 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:48:21,470 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,472 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,43139,1732020500692, state=OPENING 2024-11-19T12:48:21,522 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:48:21,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:21,530 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:48:21,531 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:48:21,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:48:21,531 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:48:21,531 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,43139,1732020500692}] 2024-11-19T12:48:21,685 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:48:21,686 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37797, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:48:21,690 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:48:21,690 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:48:21,692 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C43139%2C1732020500692.meta, suffix=.meta, logDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692, archiveDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs, maxLogs=32 2024-11-19T12:48:21,692 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43139%2C1732020500692.meta.1732020501692.meta 2024-11-19T12:48:21,697 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.meta.1732020501692.meta 2024-11-19T12:48:21,698 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44149:44149),(127.0.0.1/127.0.0.1:35713:35713)] 2024-11-19T12:48:21,699 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:48:21,699 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:48:21,699 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:48:21,700 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:48:21,700 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:48:21,700 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:21,700 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:48:21,700 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:48:21,701 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:48:21,702 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:48:21,702 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,703 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:48:21,704 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:48:21,704 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,704 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:48:21,705 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:48:21,705 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,705 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,706 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:48:21,706 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:48:21,706 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,707 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:48:21,707 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:48:21,707 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740 2024-11-19T12:48:21,708 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740 2024-11-19T12:48:21,710 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:48:21,710 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:48:21,710 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:48:21,712 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:48:21,713 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=744140, jitterRate=-0.0537775456905365}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:48:21,713 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:48:21,714 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020501700Writing region info on filesystem at 1732020501700Initializing all the Stores at 1732020501701 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501701Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501701Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020501701Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020501701Cleaning up temporary data from old regions at 1732020501710 (+9 ms)Running coprocessor post-open hooks at 1732020501713 (+3 ms)Region opened successfully at 1732020501714 (+1 ms) 2024-11-19T12:48:21,716 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020501685 2024-11-19T12:48:21,719 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:48:21,719 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:48:21,720 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,721 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,43139,1732020500692, state=OPEN 2024-11-19T12:48:21,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:48:21,761 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:48:21,761 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:21,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:48:21,761 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:48:21,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:48:21,764 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,43139,1732020500692 in 230 msec 2024-11-19T12:48:21,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:48:21,768 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 699 msec 2024-11-19T12:48:21,769 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:48:21,769 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:48:21,770 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:48:21,771 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,43139,1732020500692, seqNum=-1] 2024-11-19T12:48:21,771 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:48:21,772 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37031, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:48:21,779 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 752 msec 2024-11-19T12:48:21,779 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020501779, completionTime=-1 2024-11-19T12:48:21,780 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:48:21,780 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020561782 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020621782 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:40617, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,782 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,783 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,785 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.033sec 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:48:21,788 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:48:21,791 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:48:21,791 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:48:21,791 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40617,1732020500545-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:48:21,819 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1150b68a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:48:21,819 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,40617,-1 for getting cluster id 2024-11-19T12:48:21,820 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:48:21,822 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '957e2a71-d987-4072-9c6e-f2fd67e59ac0' 2024-11-19T12:48:21,822 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:48:21,823 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "957e2a71-d987-4072-9c6e-f2fd67e59ac0" 2024-11-19T12:48:21,823 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@bedd455, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:48:21,823 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,40617,-1] 2024-11-19T12:48:21,823 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:48:21,824 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:48:21,825 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54962, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:48:21,826 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2c4d7e53, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:48:21,827 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:48:21,828 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,43139,1732020500692, seqNum=-1] 2024-11-19T12:48:21,829 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:48:21,830 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:53532, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:48:21,832 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,40617,1732020500545 2024-11-19T12:48:21,833 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:48:21,836 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:48:21,836 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:48:21,837 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,40617,1732020500545 2024-11-19T12:48:21,837 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@779f45a 2024-11-19T12:48:21,838 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:48:21,839 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54966, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:48:21,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T12:48:21,839 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T12:48:21,840 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:48:21,841 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:21,843 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:48:21,843 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:21,843 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-19T12:48:21,844 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:48:21,844 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:48:21,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741835_1011 (size=405) 2024-11-19T12:48:21,858 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741835_1011 (size=405) 2024-11-19T12:48:21,860 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 76bd82c964904a115ac489026eff1e82, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0 2024-11-19T12:48:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741836_1012 (size=88) 2024-11-19T12:48:21,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741836_1012 (size=88) 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 76bd82c964904a115ac489026eff1e82, disabling compactions & flushes 2024-11-19T12:48:21,868 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. after waiting 0 ms 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:21,868 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:21,868 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 76bd82c964904a115ac489026eff1e82: Waiting for close lock at 1732020501868Disabling compacts and flushes for region at 1732020501868Disabling writes for close at 1732020501868Writing region close event to WAL at 1732020501868Closed at 1732020501868 2024-11-19T12:48:21,870 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:48:21,870 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732020501870"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020501870"}]},"ts":"1732020501870"} 2024-11-19T12:48:21,872 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:48:21,874 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:48:21,874 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020501874"}]},"ts":"1732020501874"} 2024-11-19T12:48:21,876 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-19T12:48:21,877 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76bd82c964904a115ac489026eff1e82, ASSIGN}] 2024-11-19T12:48:21,878 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76bd82c964904a115ac489026eff1e82, ASSIGN 2024-11-19T12:48:21,879 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76bd82c964904a115ac489026eff1e82, ASSIGN; state=OFFLINE, location=aba5a916dfea,43139,1732020500692; forceNewPlan=false, retain=false 2024-11-19T12:48:22,030 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=76bd82c964904a115ac489026eff1e82, regionState=OPENING, regionLocation=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:22,032 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76bd82c964904a115ac489026eff1e82, ASSIGN because future has completed 2024-11-19T12:48:22,032 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76bd82c964904a115ac489026eff1e82, server=aba5a916dfea,43139,1732020500692}] 2024-11-19T12:48:22,189 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:22,189 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 76bd82c964904a115ac489026eff1e82, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:48:22,189 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,190 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:48:22,190 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,190 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,191 INFO [StoreOpener-76bd82c964904a115ac489026eff1e82-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,193 INFO [StoreOpener-76bd82c964904a115ac489026eff1e82-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 76bd82c964904a115ac489026eff1e82 columnFamilyName info 2024-11-19T12:48:22,193 DEBUG [StoreOpener-76bd82c964904a115ac489026eff1e82-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:48:22,193 INFO [StoreOpener-76bd82c964904a115ac489026eff1e82-1 {}] regionserver.HStore(327): Store=76bd82c964904a115ac489026eff1e82/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:48:22,193 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,194 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,194 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,195 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,195 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,197 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,199 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:48:22,200 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 76bd82c964904a115ac489026eff1e82; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=784374, jitterRate=-0.0026175975799560547}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:48:22,200 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:48:22,201 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 76bd82c964904a115ac489026eff1e82: Running coprocessor pre-open hook at 1732020502190Writing region info on filesystem at 1732020502190Initializing all the Stores at 1732020502191 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020502191Cleaning up temporary data from old regions at 1732020502195 (+4 ms)Running coprocessor post-open hooks at 1732020502200 (+5 ms)Region opened successfully at 1732020502201 (+1 ms) 2024-11-19T12:48:22,202 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82., pid=6, masterSystemTime=1732020502185 2024-11-19T12:48:22,205 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:22,205 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:22,206 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=76bd82c964904a115ac489026eff1e82, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,43139,1732020500692 2024-11-19T12:48:22,208 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 76bd82c964904a115ac489026eff1e82, server=aba5a916dfea,43139,1732020500692 because future has completed 2024-11-19T12:48:22,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:48:22,212 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 76bd82c964904a115ac489026eff1e82, server=aba5a916dfea,43139,1732020500692 in 178 msec 2024-11-19T12:48:22,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:48:22,215 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=76bd82c964904a115ac489026eff1e82, ASSIGN in 336 msec 2024-11-19T12:48:22,216 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:48:22,216 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020502216"}]},"ts":"1732020502216"} 2024-11-19T12:48:22,218 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-19T12:48:22,219 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:48:22,221 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 379 msec 2024-11-19T12:48:22,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:22,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:23,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:23,224 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:23,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:48:23,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:48:23,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:48:23,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:48:23,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:23,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T12:48:24,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:24,225 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:25,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:25,226 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:26,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:26,227 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:26,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,701 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,702 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,723 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,724 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,728 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:26,730 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:27,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:27,235 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:48:27,236 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,237 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,238 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,266 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,267 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,268 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,272 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,273 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,275 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:48:27,281 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:48:27,282 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-19T12:48:28,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:28,228 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:29,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:29,229 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:30,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:30,230 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:31,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:31,231 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:31,949 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:48:31,949 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T12:48:31,949 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-19T12:48:31,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:31,953 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:31,956 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82., hostname=aba5a916dfea,43139,1732020500692, seqNum=2] 2024-11-19T12:48:31,963 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:31,967 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:31,969 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:48:31,969 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:48:31,970 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:48:31,972 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:48:32,133 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43139 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-19T12:48:32,134 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:32,134 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 76bd82c964904a115ac489026eff1e82 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T12:48:32,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/fdd16c74994b40d2b324f45f85ab2df1 is 1080, key is row0001/info:/1732020511957/Put/seqid=0 2024-11-19T12:48:32,157 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741837_1013 (size=6033) 2024-11-19T12:48:32,158 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741837_1013 (size=6033) 2024-11-19T12:48:32,158 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/fdd16c74994b40d2b324f45f85ab2df1 2024-11-19T12:48:32,165 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/fdd16c74994b40d2b324f45f85ab2df1 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1 2024-11-19T12:48:32,171 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1, entries=1, sequenceid=5, filesize=5.9 K 2024-11-19T12:48:32,172 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 38ms, sequenceid=5, compaction requested=false 2024-11-19T12:48:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 76bd82c964904a115ac489026eff1e82: 2024-11-19T12:48:32,172 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:32,174 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-19T12:48:32,177 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-19T12:48:32,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T12:48:32,181 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 206 msec 2024-11-19T12:48:32,184 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 218 msec 2024-11-19T12:48:32,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:32,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:33,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:33,232 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:34,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:34,233 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:35,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:35,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:36,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:36,234 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:37,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:37,235 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:38,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:38,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:39,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:39,236 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:40,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:40,237 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:41,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:41,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:42,049 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-19T12:48:42,050 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T12:48:42,053 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:42,054 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T12:48:42,055 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:48:42,055 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:48:42,056 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:48:42,208 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43139 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-19T12:48:42,209 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:42,209 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 76bd82c964904a115ac489026eff1e82 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T12:48:42,214 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/af288f3b92394459b2998c5626c8b3af is 1080, key is row0002/info:/1732020522051/Put/seqid=0 2024-11-19T12:48:42,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741838_1014 (size=6033) 2024-11-19T12:48:42,221 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741838_1014 (size=6033) 2024-11-19T12:48:42,222 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/af288f3b92394459b2998c5626c8b3af 2024-11-19T12:48:42,229 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/af288f3b92394459b2998c5626c8b3af as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af 2024-11-19T12:48:42,236 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af, entries=1, sequenceid=9, filesize=5.9 K 2024-11-19T12:48:42,237 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 28ms, sequenceid=9, compaction requested=false 2024-11-19T12:48:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 76bd82c964904a115ac489026eff1e82: 2024-11-19T12:48:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:42,237 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-19T12:48:42,238 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-19T12:48:42,238 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:42,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:42,242 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-19T12:48:42,242 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-19T12:48:42,244 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-19T12:48:43,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:43,239 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:43,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta after 68054ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:43,240 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 after 68061ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-19T12:48:44,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:44,241 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:45,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:45,242 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:46,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:46,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:47,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:47,243 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:48,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:48,244 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:49,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:49,245 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:50,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:50,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:50,497 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:48:51,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:51,246 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:52,099 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-19T12:48:52,100 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T12:48:52,103 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43139%2C1732020500692.1732020532102 2024-11-19T12:48:52,108 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:52,108 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:52,108 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:52,108 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:52,108 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:48:52,108 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020501331 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020532102 2024-11-19T12:48:52,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44149:44149),(127.0.0.1/127.0.0.1:35713:35713)] 2024-11-19T12:48:52,109 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020501331 is not closed yet, will try archiving it next time 2024-11-19T12:48:52,110 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741833_1009 (size=5546) 2024-11-19T12:48:52,110 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:52,111 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741833_1009 (size=5546) 2024-11-19T12:48:52,112 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:48:52,113 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:48:52,113 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T12:48:52,114 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:48:52,114 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:48:52,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:52,247 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:52,268 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=43139 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-19T12:48:52,269 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:52,269 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 76bd82c964904a115ac489026eff1e82 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T12:48:52,276 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/0b9da20f838f46028229ee5583bba7b9 is 1080, key is row0003/info:/1732020532101/Put/seqid=0 2024-11-19T12:48:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741840_1016 (size=6033) 2024-11-19T12:48:52,292 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741840_1016 (size=6033) 2024-11-19T12:48:52,293 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/0b9da20f838f46028229ee5583bba7b9 2024-11-19T12:48:52,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/0b9da20f838f46028229ee5583bba7b9 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9 2024-11-19T12:48:52,305 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9, entries=1, sequenceid=13, filesize=5.9 K 2024-11-19T12:48:52,306 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 37ms, sequenceid=13, compaction requested=true 2024-11-19T12:48:52,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 76bd82c964904a115ac489026eff1e82: 2024-11-19T12:48:52,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:48:52,306 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-19T12:48:52,307 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-19T12:48:52,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-19T12:48:52,311 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 194 msec 2024-11-19T12:48:52,313 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-19T12:48:53,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:53,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:54,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:54,248 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:55,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:55,249 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:56,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:56,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:57,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:57,250 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:58,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:58,251 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:59,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:48:59,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:00,252 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:00,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:01,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:01,253 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:02,189 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-19T12:49:02,189 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T12:49:02,189 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:02,191 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:02,191 DEBUG [Time-limited test {}] regionserver.HStore(1541): 76bd82c964904a115ac489026eff1e82/info is initiating minor compaction (all files) 2024-11-19T12:49:02,191 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:49:02,191 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:02,191 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 76bd82c964904a115ac489026eff1e82/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:02,192 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9] into tmpdir=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp, totalSize=17.7 K 2024-11-19T12:49:02,192 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting fdd16c74994b40d2b324f45f85ab2df1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732020511957 2024-11-19T12:49:02,193 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting af288f3b92394459b2998c5626c8b3af, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732020522051 2024-11-19T12:49:02,193 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 0b9da20f838f46028229ee5583bba7b9, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732020532101 2024-11-19T12:49:02,208 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 76bd82c964904a115ac489026eff1e82#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:02,208 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/601583c080714ac39ab7f50e2bbeb924 is 1080, key is row0001/info:/1732020511957/Put/seqid=0 2024-11-19T12:49:02,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741841_1017 (size=8296) 2024-11-19T12:49:02,218 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741841_1017 (size=8296) 2024-11-19T12:49:02,225 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/601583c080714ac39ab7f50e2bbeb924 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/601583c080714ac39ab7f50e2bbeb924 2024-11-19T12:49:02,233 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 76bd82c964904a115ac489026eff1e82/info of 76bd82c964904a115ac489026eff1e82 into 601583c080714ac39ab7f50e2bbeb924(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:02,233 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 76bd82c964904a115ac489026eff1e82: 2024-11-19T12:49:02,236 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43139%2C1732020500692.1732020542236 2024-11-19T12:49:02,244 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:02,244 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:02,244 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:02,244 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:02,244 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:02,244 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020532102 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020542236 2024-11-19T12:49:02,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741839_1015 (size=2520) 2024-11-19T12:49:02,246 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741839_1015 (size=2520) 2024-11-19T12:49:02,247 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44149:44149),(127.0.0.1/127.0.0.1:35713:35713)] 2024-11-19T12:49:02,247 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020501331 to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs/aba5a916dfea%2C43139%2C1732020500692.1732020501331 2024-11-19T12:49:02,248 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:49:02,249 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:49:02,250 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T12:49:02,250 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-19T12:49:02,251 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-19T12:49:02,251 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-19T12:49:02,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:02,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:02,404 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=43139 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-19T12:49:02,405 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:02,405 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 76bd82c964904a115ac489026eff1e82 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T12:49:02,412 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/e5dad64d88f64ce89283b7c7d18c331d is 1080, key is row0000/info:/1732020542235/Put/seqid=0 2024-11-19T12:49:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741843_1019 (size=6033) 2024-11-19T12:49:02,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741843_1019 (size=6033) 2024-11-19T12:49:02,421 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/e5dad64d88f64ce89283b7c7d18c331d 2024-11-19T12:49:02,427 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/e5dad64d88f64ce89283b7c7d18c331d as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/e5dad64d88f64ce89283b7c7d18c331d 2024-11-19T12:49:02,433 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/e5dad64d88f64ce89283b7c7d18c331d, entries=1, sequenceid=18, filesize=5.9 K 2024-11-19T12:49:02,434 INFO [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 29ms, sequenceid=18, compaction requested=false 2024-11-19T12:49:02,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 76bd82c964904a115ac489026eff1e82: 2024-11-19T12:49:02,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:02,435 DEBUG [RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-19T12:49:02,435 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-19T12:49:02,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-19T12:49:02,439 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 185 msec 2024-11-19T12:49:02,442 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 192 msec 2024-11-19T12:49:02,442 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T12:49:02,442 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T12:49:03,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:03,254 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:04,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:04,255 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:05,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:05,256 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:06,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:06,257 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:07,190 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 76bd82c964904a115ac489026eff1e82, had cached 0 bytes from a total of 14329 2024-11-19T12:49:07,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:07,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:08,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:08,258 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:09,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:09,259 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:10,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:10,260 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:11,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:11,261 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:12,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:12,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:12,269 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=40617 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-19T12:49:12,269 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-19T12:49:12,272 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C43139%2C1732020500692.1732020552272 2024-11-19T12:49:12,279 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,279 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,279 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,279 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,279 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,280 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020542236 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020552272 2024-11-19T12:49:12,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35713:35713),(127.0.0.1/127.0.0.1:44149:44149)] 2024-11-19T12:49:12,280 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020542236 is not closed yet, will try archiving it next time 2024-11-19T12:49:12,281 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:49:12,281 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/WALs/aba5a916dfea,43139,1732020500692/aba5a916dfea%2C43139%2C1732020500692.1732020532102 to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs/aba5a916dfea%2C43139%2C1732020500692.1732020532102 2024-11-19T12:49:12,281 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:49:12,281 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:49:12,281 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:12,281 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:12,281 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741842_1018 (size=2026) 2024-11-19T12:49:12,281 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:49:12,281 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:49:12,282 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741842_1018 (size=2026) 2024-11-19T12:49:12,282 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1072930910, stopped=false 2024-11-19T12:49:12,282 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,40617,1732020500545 2024-11-19T12:49:12,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:49:12,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:49:12,331 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:49:12,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:12,331 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:12,331 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:49:12,331 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:49:12,331 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:12,332 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,43139,1732020500692' ***** 2024-11-19T12:49:12,332 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:49:12,332 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:49:12,332 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:49:12,332 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:49:12,332 INFO [RS:0;aba5a916dfea:43139 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:49:12,332 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:49:12,332 INFO [RS:0;aba5a916dfea:43139 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:49:12,332 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(3091): Received CLOSE for 76bd82c964904a115ac489026eff1e82 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,43139,1732020500692 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:43139. 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 76bd82c964904a115ac489026eff1e82, disabling compactions & flushes 2024-11-19T12:49:12,333 DEBUG [RS:0;aba5a916dfea:43139 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:49:12,333 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:12,333 DEBUG [RS:0;aba5a916dfea:43139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. after waiting 0 ms 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:49:12,333 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 76bd82c964904a115ac489026eff1e82 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-19T12:49:12,333 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-19T12:49:12,333 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1325): Online Regions={76bd82c964904a115ac489026eff1e82=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:49:12,333 DEBUG [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 76bd82c964904a115ac489026eff1e82 2024-11-19T12:49:12,333 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:49:12,333 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:49:12,334 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:49:12,334 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:49:12,334 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-19T12:49:12,338 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/36b3cf0eb36447f2a1082b079ac0f82e is 1080, key is row0001/info:/1732020552271/Put/seqid=0 2024-11-19T12:49:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741845_1021 (size=6033) 2024-11-19T12:49:12,343 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741845_1021 (size=6033) 2024-11-19T12:49:12,344 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/36b3cf0eb36447f2a1082b079ac0f82e 2024-11-19T12:49:12,351 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/.tmp/info/36b3cf0eb36447f2a1082b079ac0f82e as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/36b3cf0eb36447f2a1082b079ac0f82e 2024-11-19T12:49:12,352 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/info/38c09934d9924a09906c5538a2c8bd38 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82./info:regioninfo/1732020502206/Put/seqid=0 2024-11-19T12:49:12,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741846_1022 (size=7308) 2024-11-19T12:49:12,366 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/36b3cf0eb36447f2a1082b079ac0f82e, entries=1, sequenceid=22, filesize=5.9 K 2024-11-19T12:49:12,366 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741846_1022 (size=7308) 2024-11-19T12:49:12,366 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/info/38c09934d9924a09906c5538a2c8bd38 2024-11-19T12:49:12,367 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 34ms, sequenceid=22, compaction requested=true 2024-11-19T12:49:12,368 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9] to archive 2024-11-19T12:49:12,368 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:49:12,370 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1 to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/fdd16c74994b40d2b324f45f85ab2df1 2024-11-19T12:49:12,371 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/af288f3b92394459b2998c5626c8b3af 2024-11-19T12:49:12,372 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9 to hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/info/0b9da20f838f46028229ee5583bba7b9 2024-11-19T12:49:12,373 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=aba5a916dfea:40617 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T12:49:12,373 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [fdd16c74994b40d2b324f45f85ab2df1=6033, af288f3b92394459b2998c5626c8b3af=6033, 0b9da20f838f46028229ee5583bba7b9=6033] 2024-11-19T12:49:12,378 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/76bd82c964904a115ac489026eff1e82/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-19T12:49:12,379 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:12,379 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 76bd82c964904a115ac489026eff1e82: Waiting for close lock at 1732020552333Running coprocessor pre-close hooks at 1732020552333Disabling compacts and flushes for region at 1732020552333Disabling writes for close at 1732020552333Obtaining lock to block concurrent updates at 1732020552333Preparing flush snapshotting stores in 76bd82c964904a115ac489026eff1e82 at 1732020552333Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732020552333Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. at 1732020552334 (+1 ms)Flushing 76bd82c964904a115ac489026eff1e82/info: creating writer at 1732020552334Flushing 76bd82c964904a115ac489026eff1e82/info: appending metadata at 1732020552338 (+4 ms)Flushing 76bd82c964904a115ac489026eff1e82/info: closing flushed file at 1732020552338Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@14007652: reopening flushed file at 1732020552350 (+12 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 76bd82c964904a115ac489026eff1e82 in 34ms, sequenceid=22, compaction requested=true at 1732020552367 (+17 ms)Writing region close event to WAL at 1732020552374 (+7 ms)Running coprocessor post-close hooks at 1732020552379 (+5 ms)Closed at 1732020552379 2024-11-19T12:49:12,379 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732020501839.76bd82c964904a115ac489026eff1e82. 2024-11-19T12:49:12,386 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/ns/7f346f1e93584542990a80e07bfd69e9 is 43, key is default/ns:d/1732020501773/Put/seqid=0 2024-11-19T12:49:12,390 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741847_1023 (size=5153) 2024-11-19T12:49:12,391 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741847_1023 (size=5153) 2024-11-19T12:49:12,391 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/ns/7f346f1e93584542990a80e07bfd69e9 2024-11-19T12:49:12,412 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/table/e36f25aa3bcf4a5da7dd8c0e4e3236c8 is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732020502216/Put/seqid=0 2024-11-19T12:49:12,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741848_1024 (size=5508) 2024-11-19T12:49:12,420 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741848_1024 (size=5508) 2024-11-19T12:49:12,420 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/table/e36f25aa3bcf4a5da7dd8c0e4e3236c8 2024-11-19T12:49:12,427 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/info/38c09934d9924a09906c5538a2c8bd38 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/info/38c09934d9924a09906c5538a2c8bd38 2024-11-19T12:49:12,433 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/info/38c09934d9924a09906c5538a2c8bd38, entries=10, sequenceid=11, filesize=7.1 K 2024-11-19T12:49:12,434 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/ns/7f346f1e93584542990a80e07bfd69e9 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/ns/7f346f1e93584542990a80e07bfd69e9 2024-11-19T12:49:12,439 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/ns/7f346f1e93584542990a80e07bfd69e9, entries=2, sequenceid=11, filesize=5.0 K 2024-11-19T12:49:12,440 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/.tmp/table/e36f25aa3bcf4a5da7dd8c0e4e3236c8 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/table/e36f25aa3bcf4a5da7dd8c0e4e3236c8 2024-11-19T12:49:12,446 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/table/e36f25aa3bcf4a5da7dd8c0e4e3236c8, entries=2, sequenceid=11, filesize=5.4 K 2024-11-19T12:49:12,447 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false 2024-11-19T12:49:12,452 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-19T12:49:12,452 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:49:12,452 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:49:12,453 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020552333Running coprocessor pre-close hooks at 1732020552333Disabling compacts and flushes for region at 1732020552333Disabling writes for close at 1732020552334 (+1 ms)Obtaining lock to block concurrent updates at 1732020552334Preparing flush snapshotting stores in 1588230740 at 1732020552334Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732020552334Flushing stores of hbase:meta,,1.1588230740 at 1732020552335 (+1 ms)Flushing 1588230740/info: creating writer at 1732020552335Flushing 1588230740/info: appending metadata at 1732020552351 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732020552351Flushing 1588230740/ns: creating writer at 1732020552372 (+21 ms)Flushing 1588230740/ns: appending metadata at 1732020552386 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732020552386Flushing 1588230740/table: creating writer at 1732020552396 (+10 ms)Flushing 1588230740/table: appending metadata at 1732020552411 (+15 ms)Flushing 1588230740/table: closing flushed file at 1732020552411Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5f312040: reopening flushed file at 1732020552426 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3a857573: reopening flushed file at 1732020552433 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@33178726: reopening flushed file at 1732020552439 (+6 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 113ms, sequenceid=11, compaction requested=false at 1732020552447 (+8 ms)Writing region close event to WAL at 1732020552448 (+1 ms)Running coprocessor post-close hooks at 1732020552452 (+4 ms)Closed at 1732020552452 2024-11-19T12:49:12,453 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:49:12,534 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,43139,1732020500692; all regions closed. 2024-11-19T12:49:12,534 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,535 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,535 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,535 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,535 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,538 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741834_1010 (size=3306) 2024-11-19T12:49:12,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741834_1010 (size=3306) 2024-11-19T12:49:12,545 DEBUG [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs 2024-11-19T12:49:12,545 INFO [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C43139%2C1732020500692.meta:.meta(num 1732020501692) 2024-11-19T12:49:12,545 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,545 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,545 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,545 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,546 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,547 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741844_1020 (size=1252) 2024-11-19T12:49:12,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741844_1020 (size=1252) 2024-11-19T12:49:12,551 DEBUG [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/oldWALs 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C43139%2C1732020500692:(num 1732020552272) 2024-11-19T12:49:12,551 DEBUG [RS:0;aba5a916dfea:43139 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:49:12,551 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:49:12,551 INFO [RS:0;aba5a916dfea:43139 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43139 2024-11-19T12:49:12,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,43139,1732020500692 2024-11-19T12:49:12,585 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:49:12,586 INFO [RS:0;aba5a916dfea:43139 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:49:12,594 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,43139,1732020500692] 2024-11-19T12:49:12,602 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,43139,1732020500692 already deleted, retry=false 2024-11-19T12:49:12,602 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,43139,1732020500692 expired; onlineServers=0 2024-11-19T12:49:12,602 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,40617,1732020500545' ***** 2024-11-19T12:49:12,602 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:49:12,602 INFO [M:0;aba5a916dfea:40617 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:49:12,602 INFO [M:0;aba5a916dfea:40617 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:49:12,602 DEBUG [M:0;aba5a916dfea:40617 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:49:12,602 DEBUG [M:0;aba5a916dfea:40617 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:49:12,602 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:49:12,602 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020501032 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020501032,5,FailOnTimeoutGroup] 2024-11-19T12:49:12,602 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020501032 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020501032,5,FailOnTimeoutGroup] 2024-11-19T12:49:12,603 INFO [M:0;aba5a916dfea:40617 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:49:12,603 INFO [M:0;aba5a916dfea:40617 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:49:12,603 DEBUG [M:0;aba5a916dfea:40617 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:49:12,603 INFO [M:0;aba5a916dfea:40617 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:49:12,603 INFO [M:0;aba5a916dfea:40617 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:49:12,603 INFO [M:0;aba5a916dfea:40617 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:49:12,603 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:49:12,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:49:12,614 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:12,614 DEBUG [M:0;aba5a916dfea:40617 {}] zookeeper.ZKUtil(347): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:49:12,615 WARN [M:0;aba5a916dfea:40617 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:49:12,615 INFO [M:0;aba5a916dfea:40617 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/.lastflushedseqids 2024-11-19T12:49:12,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741849_1025 (size=130) 2024-11-19T12:49:12,622 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741849_1025 (size=130) 2024-11-19T12:49:12,623 INFO [M:0;aba5a916dfea:40617 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:49:12,623 INFO [M:0;aba5a916dfea:40617 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:49:12,623 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:49:12,623 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:12,623 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:12,623 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:49:12,623 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:12,624 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.60 KB heapSize=55.01 KB 2024-11-19T12:49:12,644 DEBUG [M:0;aba5a916dfea:40617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/509f132cca9f4681b2da46166444a568 is 82, key is hbase:meta,,1/info:regioninfo/1732020501720/Put/seqid=0 2024-11-19T12:49:12,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741850_1026 (size=5672) 2024-11-19T12:49:12,648 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741850_1026 (size=5672) 2024-11-19T12:49:12,649 INFO [M:0;aba5a916dfea:40617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/509f132cca9f4681b2da46166444a568 2024-11-19T12:49:12,670 DEBUG [M:0;aba5a916dfea:40617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa018f0a683442399cebdda2c2a34604 is 799, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020502220/Put/seqid=0 2024-11-19T12:49:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741851_1027 (size=7824) 2024-11-19T12:49:12,675 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741851_1027 (size=7824) 2024-11-19T12:49:12,675 INFO [M:0;aba5a916dfea:40617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=43.00 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa018f0a683442399cebdda2c2a34604 2024-11-19T12:49:12,679 INFO [M:0;aba5a916dfea:40617 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fa018f0a683442399cebdda2c2a34604 2024-11-19T12:49:12,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:49:12,694 INFO [RS:0;aba5a916dfea:43139 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:49:12,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43139-0x101546d0af60001, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:49:12,694 INFO [RS:0;aba5a916dfea:43139 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,43139,1732020500692; zookeeper connection closed. 2024-11-19T12:49:12,694 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@6b2a17bf {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@6b2a17bf 2024-11-19T12:49:12,695 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:49:12,696 DEBUG [M:0;aba5a916dfea:40617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3c181f02d134b98b970efd275aada3b is 69, key is aba5a916dfea,43139,1732020500692/rs:state/1732020501159/Put/seqid=0 2024-11-19T12:49:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741852_1028 (size=5156) 2024-11-19T12:49:12,700 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741852_1028 (size=5156) 2024-11-19T12:49:12,701 INFO [M:0;aba5a916dfea:40617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3c181f02d134b98b970efd275aada3b 2024-11-19T12:49:12,722 DEBUG [M:0;aba5a916dfea:40617 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6095b8d95b78451a9788c7f4ae615b0c is 52, key is load_balancer_on/state:d/1732020501834/Put/seqid=0 2024-11-19T12:49:12,726 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741853_1029 (size=5056) 2024-11-19T12:49:12,727 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741853_1029 (size=5056) 2024-11-19T12:49:12,727 INFO [M:0;aba5a916dfea:40617 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6095b8d95b78451a9788c7f4ae615b0c 2024-11-19T12:49:12,733 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/509f132cca9f4681b2da46166444a568 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/509f132cca9f4681b2da46166444a568 2024-11-19T12:49:12,740 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/509f132cca9f4681b2da46166444a568, entries=8, sequenceid=121, filesize=5.5 K 2024-11-19T12:49:12,741 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/fa018f0a683442399cebdda2c2a34604 as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fa018f0a683442399cebdda2c2a34604 2024-11-19T12:49:12,746 INFO [M:0;aba5a916dfea:40617 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for fa018f0a683442399cebdda2c2a34604 2024-11-19T12:49:12,746 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/fa018f0a683442399cebdda2c2a34604, entries=14, sequenceid=121, filesize=7.6 K 2024-11-19T12:49:12,747 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/e3c181f02d134b98b970efd275aada3b as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3c181f02d134b98b970efd275aada3b 2024-11-19T12:49:12,752 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/e3c181f02d134b98b970efd275aada3b, entries=1, sequenceid=121, filesize=5.0 K 2024-11-19T12:49:12,753 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/6095b8d95b78451a9788c7f4ae615b0c as hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6095b8d95b78451a9788c7f4ae615b0c 2024-11-19T12:49:12,759 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36925/user/jenkins/test-data/6b56602a-9db6-9f0f-01c0-56066ab575c0/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/6095b8d95b78451a9788c7f4ae615b0c, entries=1, sequenceid=121, filesize=4.9 K 2024-11-19T12:49:12,760 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=121, compaction requested=false 2024-11-19T12:49:12,761 INFO [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:12,762 DEBUG [M:0;aba5a916dfea:40617 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020552623Disabling compacts and flushes for region at 1732020552623Disabling writes for close at 1732020552623Obtaining lock to block concurrent updates at 1732020552624 (+1 ms)Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020552624Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44650, getHeapSize=56264, getOffHeapSize=0, getCellsCount=140 at 1732020552624Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020552625 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020552625Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020552643 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020552643Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020552654 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020552670 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020552670Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020552680 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020552695 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020552695Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020552706 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020552721 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020552722 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7443aecc: reopening flushed file at 1732020552732 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@546f392a: reopening flushed file at 1732020552740 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1a6c6b34: reopening flushed file at 1732020552746 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@277db228: reopening flushed file at 1732020552752 (+6 ms)Finished flush of dataSize ~43.60 KB/44650, heapSize ~54.95 KB/56264, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 137ms, sequenceid=121, compaction requested=false at 1732020552760 (+8 ms)Writing region close event to WAL at 1732020552761 (+1 ms)Closed at 1732020552761 2024-11-19T12:49:12,762 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,762 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,762 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,762 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,762 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:49:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39143 is added to blk_1073741830_1006 (size=53047) 2024-11-19T12:49:12,764 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40915 is added to blk_1073741830_1006 (size=53047) 2024-11-19T12:49:12,765 INFO [M:0;aba5a916dfea:40617 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:49:12,765 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:49:12,765 INFO [M:0;aba5a916dfea:40617 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40617 2024-11-19T12:49:12,765 INFO [M:0;aba5a916dfea:40617 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:49:12,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:49:12,894 INFO [M:0;aba5a916dfea:40617 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:49:12,894 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:40617-0x101546d0af60000, quorum=127.0.0.1:63076, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:49:12,935 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@22de9ffd{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:49:12,935 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@11be9bab{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:49:12,935 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:49:12,935 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5332e2ad{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:49:12,935 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f5820{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,STOPPED} 2024-11-19T12:49:12,937 WARN [BP-881519828-172.17.0.2-1732020498751 heartbeating to localhost/127.0.0.1:36925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:49:12,937 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:49:12,937 WARN [BP-881519828-172.17.0.2-1732020498751 heartbeating to localhost/127.0.0.1:36925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-881519828-172.17.0.2-1732020498751 (Datanode Uuid 78a264eb-ae5a-4f93-a93b-43a9b49ad14c) service to localhost/127.0.0.1:36925 2024-11-19T12:49:12,937 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:49:12,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data3/current/BP-881519828-172.17.0.2-1732020498751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:49:12,938 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data4/current/BP-881519828-172.17.0.2-1732020498751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:49:12,938 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:49:12,940 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a4ab4a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:49:12,941 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2fb77c6e{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:49:12,941 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:49:12,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7a6a3c52{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:49:12,941 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4369974{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,STOPPED} 2024-11-19T12:49:12,943 WARN [BP-881519828-172.17.0.2-1732020498751 heartbeating to localhost/127.0.0.1:36925 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:49:12,943 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:49:12,943 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:49:12,943 WARN [BP-881519828-172.17.0.2-1732020498751 heartbeating to localhost/127.0.0.1:36925 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-881519828-172.17.0.2-1732020498751 (Datanode Uuid 7f60838b-cb9c-49ce-9d95-5c41fcb53d6c) service to localhost/127.0.0.1:36925 2024-11-19T12:49:12,943 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data1/current/BP-881519828-172.17.0.2-1732020498751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:49:12,944 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/cluster_54284d96-724e-3688-d80c-22639cb0c993/data/data2/current/BP-881519828-172.17.0.2-1732020498751 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:49:12,944 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:49:12,950 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@287bf7ab{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:49:12,951 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@dfb0819{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:49:12,951 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:49:12,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@610a5e11{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:49:12,951 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@47aef258{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir/,STOPPED} 2024-11-19T12:49:12,957 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:49:12,977 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:49:12,984 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/aba5a916dfea:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:36925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36925 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36925 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36925 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36925 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:36925 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 186) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5615 (was 5843) 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=5615 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.log.dir so I do NOT create it in target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/db6ca39d-7bbd-1fb3-294a-2e1b326e366b/hadoop.tmp.dir so I do NOT create it in target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f, deleteOnExit=true 2024-11-19T12:49:12,992 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/test.cache.data in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:49:12,993 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:49:12,993 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:49:12,994 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:49:13,007 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:49:13,193 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:49:13,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:13,262 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:13,272 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:49:13,277 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:49:13,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:49:13,278 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:49:13,278 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:49:13,279 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:49:13,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6aaaafb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:49:13,279 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@621c58fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:49:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:49:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:49:13,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:49:13,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-19T12:49:13,384 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7700407f{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/java.io.tmpdir/jetty-localhost-33685-hadoop-hdfs-3_4_1-tests_jar-_-any-15283099293368427475/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:49:13,385 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e430f9{HTTP/1.1, (http/1.1)}{localhost:33685} 2024-11-19T12:49:13,385 INFO [Time-limited test {}] server.Server(415): Started @247083ms 2024-11-19T12:49:13,398 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:49:13,606 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:49:13,609 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:49:13,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:49:13,610 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:49:13,610 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:49:13,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a86eb96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:49:13,611 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4a0bbcc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:49:13,716 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@637baa5c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/java.io.tmpdir/jetty-localhost-45517-hadoop-hdfs-3_4_1-tests_jar-_-any-2927812165170559022/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:49:13,716 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3c9ced57{HTTP/1.1, (http/1.1)}{localhost:45517} 2024-11-19T12:49:13,717 INFO [Time-limited test {}] server.Server(415): Started @247414ms 2024-11-19T12:49:13,718 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:49:13,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:49:13,760 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:49:13,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:49:13,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:49:13,767 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:49:13,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54e8b7e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:49:13,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d63e15b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:49:13,876 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2a188763{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/java.io.tmpdir/jetty-localhost-38553-hadoop-hdfs-3_4_1-tests_jar-_-any-2024820446906018330/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:49:13,876 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4f5b93c6{HTTP/1.1, (http/1.1)}{localhost:38553} 2024-11-19T12:49:13,876 INFO [Time-limited test {}] server.Server(415): Started @247574ms 2024-11-19T12:49:13,877 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:49:14,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:14,263 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:14,422 WARN [Thread-1963 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data1/current/BP-1592302674-172.17.0.2-1732020553018/current, will proceed with Du for space computation calculation, 2024-11-19T12:49:14,423 WARN [Thread-1964 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data2/current/BP-1592302674-172.17.0.2-1732020553018/current, will proceed with Du for space computation calculation, 2024-11-19T12:49:14,445 WARN [Thread-1928 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:49:14,447 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a41185856a7220f with lease ID 0x8f35af00c527732f: Processing first storage report for DS-629ebfb7-791f-41a2-a6e4-02de17e6d8e8 from datanode DatanodeRegistration(127.0.0.1:36813, datanodeUuid=74b7389d-2bdc-4c37-9033-4be9efb8b9c0, infoPort=46349, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018) 2024-11-19T12:49:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a41185856a7220f with lease ID 0x8f35af00c527732f: from storage DS-629ebfb7-791f-41a2-a6e4-02de17e6d8e8 node DatanodeRegistration(127.0.0.1:36813, datanodeUuid=74b7389d-2bdc-4c37-9033-4be9efb8b9c0, infoPort=46349, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:49:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2a41185856a7220f with lease ID 0x8f35af00c527732f: Processing first storage report for DS-23a54471-d103-41ea-8acf-218106ed87be from datanode DatanodeRegistration(127.0.0.1:36813, datanodeUuid=74b7389d-2bdc-4c37-9033-4be9efb8b9c0, infoPort=46349, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018) 2024-11-19T12:49:14,448 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2a41185856a7220f with lease ID 0x8f35af00c527732f: from storage DS-23a54471-d103-41ea-8acf-218106ed87be node DatanodeRegistration(127.0.0.1:36813, datanodeUuid=74b7389d-2bdc-4c37-9033-4be9efb8b9c0, infoPort=46349, infoSecurePort=0, ipcPort=33929, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:49:14,598 WARN [Thread-1975 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data3/current/BP-1592302674-172.17.0.2-1732020553018/current, will proceed with Du for space computation calculation, 2024-11-19T12:49:14,598 WARN [Thread-1976 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data4/current/BP-1592302674-172.17.0.2-1732020553018/current, will proceed with Du for space computation calculation, 2024-11-19T12:49:14,621 WARN [Thread-1951 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:49:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2346ccb9d3cd84fb with lease ID 0x8f35af00c5277330: Processing first storage report for DS-b5dda7fb-6a43-4f2a-896e-378db42a7968 from datanode DatanodeRegistration(127.0.0.1:32881, datanodeUuid=367e7123-590e-41c7-91a6-3064a29331a2, infoPort=35553, infoSecurePort=0, ipcPort=33259, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018) 2024-11-19T12:49:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2346ccb9d3cd84fb with lease ID 0x8f35af00c5277330: from storage DS-b5dda7fb-6a43-4f2a-896e-378db42a7968 node DatanodeRegistration(127.0.0.1:32881, datanodeUuid=367e7123-590e-41c7-91a6-3064a29331a2, infoPort=35553, infoSecurePort=0, ipcPort=33259, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:49:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x2346ccb9d3cd84fb with lease ID 0x8f35af00c5277330: Processing first storage report for DS-55b85cdf-9432-4f23-a9b5-929caf9267dc from datanode DatanodeRegistration(127.0.0.1:32881, datanodeUuid=367e7123-590e-41c7-91a6-3064a29331a2, infoPort=35553, infoSecurePort=0, ipcPort=33259, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018) 2024-11-19T12:49:14,623 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2346ccb9d3cd84fb with lease ID 0x8f35af00c5277330: from storage DS-55b85cdf-9432-4f23-a9b5-929caf9267dc node DatanodeRegistration(127.0.0.1:32881, datanodeUuid=367e7123-590e-41c7-91a6-3064a29331a2, infoPort=35553, infoSecurePort=0, ipcPort=33259, storageInfo=lv=-57;cid=testClusterID;nsid=1189660527;c=1732020553018), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:49:14,705 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138 2024-11-19T12:49:14,708 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/zookeeper_0, clientPort=56416, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:49:14,708 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=56416 2024-11-19T12:49:14,709 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,710 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:49:14,719 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:49:14,720 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c with version=8 2024-11-19T12:49:14,720 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:49:14,722 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:49:14,722 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:49:14,723 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:49:14,723 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42979 2024-11-19T12:49:14,725 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42979 connecting to ZooKeeper ensemble=127.0.0.1:56416 2024-11-19T12:49:14,861 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:429790x0, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:49:14,861 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42979-0x101546dde980000 connected 2024-11-19T12:49:14,926 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,929 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,932 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:49:14,932 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c, hbase.cluster.distributed=false 2024-11-19T12:49:14,934 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:49:14,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42979 2024-11-19T12:49:14,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42979 2024-11-19T12:49:14,935 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42979 2024-11-19T12:49:14,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42979 2024-11-19T12:49:14,936 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42979 2024-11-19T12:49:14,954 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:49:14,954 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:49:14,955 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39863 2024-11-19T12:49:14,957 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:39863 connecting to ZooKeeper ensemble=127.0.0.1:56416 2024-11-19T12:49:14,958 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,959 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:14,967 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:398630x0, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:49:14,968 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:398630x0, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:49:14,968 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:39863-0x101546dde980001 connected 2024-11-19T12:49:14,968 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:49:14,969 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:49:14,969 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:49:14,970 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:49:14,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39863 2024-11-19T12:49:14,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39863 2024-11-19T12:49:14,971 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39863 2024-11-19T12:49:14,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39863 2024-11-19T12:49:14,973 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39863 2024-11-19T12:49:14,986 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:42979 2024-11-19T12:49:14,987 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,42979,1732020554722 2024-11-19T12:49:14,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:49:14,992 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:49:14,993 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:49:15,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,001 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:49:15,002 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,42979,1732020554722 from backup master directory 2024-11-19T12:49:15,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:49:15,009 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:49:15,009 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:49:15,009 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,014 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/hbase.id] with ID: 41f3ed59-9b13-4a8b-aa55-1484120f079b 2024-11-19T12:49:15,014 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/.tmp/hbase.id 2024-11-19T12:49:15,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:49:15,027 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:49:15,028 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/.tmp/hbase.id]:[hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/hbase.id] 2024-11-19T12:49:15,038 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:15,039 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:49:15,040 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T12:49:15,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,051 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:49:15,057 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:49:15,057 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:49:15,058 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:49:15,059 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:49:15,065 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:49:15,066 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:49:15,067 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store 2024-11-19T12:49:15,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:49:15,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:49:15,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:15,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:15,474 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:15,474 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:49:15,474 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:15,474 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:15,474 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:49:15,474 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:15,474 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:49:15,475 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020555474Disabling compacts and flushes for region at 1732020555474Disabling writes for close at 1732020555474Writing region close event to WAL at 1732020555474Closed at 1732020555474 2024-11-19T12:49:15,476 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/.initializing 2024-11-19T12:49:15,476 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/WALs/aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C42979%2C1732020554722, suffix=, logDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/WALs/aba5a916dfea,42979,1732020554722, archiveDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/oldWALs, maxLogs=10 2024-11-19T12:49:15,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C42979%2C1732020554722.1732020555479 2024-11-19T12:49:15,484 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/WALs/aba5a916dfea,42979,1732020554722/aba5a916dfea%2C42979%2C1732020554722.1732020555479 2024-11-19T12:49:15,485 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46349:46349),(127.0.0.1/127.0.0.1:35553:35553)] 2024-11-19T12:49:15,486 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:49:15,486 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:15,486 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,486 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,488 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,489 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:49:15,489 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:15,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:15,490 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,491 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:49:15,491 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:15,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:15,492 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,493 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:49:15,493 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:15,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:15,494 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,495 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:49:15,496 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:15,496 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:15,496 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,497 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,497 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,498 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,498 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,499 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:49:15,500 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:49:15,502 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:49:15,503 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=755357, jitterRate=-0.03951488435268402}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:49:15,503 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020555486Initializing all the Stores at 1732020555487 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020555487Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020555487Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020555487Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020555487Cleaning up temporary data from old regions at 1732020555498 (+11 ms)Region opened successfully at 1732020555503 (+5 ms) 2024-11-19T12:49:15,503 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:49:15,506 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@543958e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:49:15,507 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:49:15,507 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:49:15,507 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:49:15,508 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:49:15,508 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:49:15,508 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:49:15,508 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:49:15,510 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:49:15,511 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:49:15,566 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:49:15,567 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:49:15,567 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:49:15,576 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:49:15,576 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:49:15,577 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:49:15,584 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:49:15,585 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:49:15,592 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:49:15,595 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:49:15,600 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:49:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:49:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:49:15,609 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,610 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,42979,1732020554722, sessionid=0x101546dde980000, setting cluster-up flag (Was=false) 2024-11-19T12:49:15,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,626 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,651 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:49:15,652 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,667 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:15,692 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:49:15,694 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,42979,1732020554722 2024-11-19T12:49:15,695 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:49:15,697 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:49:15,698 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:49:15,698 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:49:15,698 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,42979,1732020554722 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:49:15,700 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020585702 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:49:15,702 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,703 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:49:15,703 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:49:15,703 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:49:15,703 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:49:15,703 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:49:15,704 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020555704,5,FailOnTimeoutGroup] 2024-11-19T12:49:15,704 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020555704,5,FailOnTimeoutGroup] 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,704 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:15,704 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,704 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:49:15,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:49:15,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:49:15,712 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:49:15,712 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c 2024-11-19T12:49:15,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:49:15,720 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:49:15,775 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(746): ClusterId : 41f3ed59-9b13-4a8b-aa55-1484120f079b 2024-11-19T12:49:15,775 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:49:15,792 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:49:15,792 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:49:15,801 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:49:15,802 DEBUG [RS:0;aba5a916dfea:39863 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1a848099, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:49:15,836 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:39863 2024-11-19T12:49:15,836 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:49:15,836 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:49:15,836 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:49:15,837 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,42979,1732020554722 with port=39863, startcode=1732020554954 2024-11-19T12:49:15,837 DEBUG [RS:0;aba5a916dfea:39863 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:49:15,839 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39567, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:49:15,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42979 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,840 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42979 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,841 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c 2024-11-19T12:49:15,841 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:42859 2024-11-19T12:49:15,841 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:49:15,851 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:49:15,851 DEBUG [RS:0;aba5a916dfea:39863 {}] zookeeper.ZKUtil(111): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,851 WARN [RS:0;aba5a916dfea:39863 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:49:15,851 INFO [RS:0;aba5a916dfea:39863 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:49:15,851 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,851 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,39863,1732020554954] 2024-11-19T12:49:15,854 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:49:15,856 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:49:15,856 INFO [RS:0;aba5a916dfea:39863 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:49:15,857 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,858 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:49:15,859 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:49:15,859 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,859 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:49:15,860 DEBUG [RS:0;aba5a916dfea:39863 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,861 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39863,1732020554954-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:49:15,882 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:49:15,882 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,39863,1732020554954-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,883 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,883 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.Replication(171): aba5a916dfea,39863,1732020554954 started 2024-11-19T12:49:15,897 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:15,897 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,39863,1732020554954, RpcServer on aba5a916dfea/172.17.0.2:39863, sessionid=0x101546dde980001 2024-11-19T12:49:15,897 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:49:15,897 DEBUG [RS:0;aba5a916dfea:39863 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,897 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39863,1732020554954' 2024-11-19T12:49:15,897 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:49:15,897 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,39863,1732020554954 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,39863,1732020554954' 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:49:15,898 DEBUG [RS:0;aba5a916dfea:39863 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:49:15,898 INFO [RS:0;aba5a916dfea:39863 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:49:15,899 INFO [RS:0;aba5a916dfea:39863 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:49:16,000 INFO [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39863%2C1732020554954, suffix=, logDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954, archiveDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs, maxLogs=32 2024-11-19T12:49:16,001 INFO [RS:0;aba5a916dfea:39863 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39863%2C1732020554954.1732020556001 2024-11-19T12:49:16,006 INFO [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020556001 2024-11-19T12:49:16,006 DEBUG [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35553:35553),(127.0.0.1/127.0.0.1:46349:46349)] 2024-11-19T12:49:16,120 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:16,121 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:49:16,122 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:49:16,122 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,123 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:49:16,124 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:49:16,124 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,124 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:49:16,126 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:49:16,126 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,126 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:49:16,128 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:49:16,128 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,128 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,128 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:49:16,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740 2024-11-19T12:49:16,129 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740 2024-11-19T12:49:16,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:49:16,131 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:49:16,131 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:49:16,133 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:49:16,135 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:49:16,136 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=709009, jitterRate=-0.0984496921300888}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:49:16,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020556120Initializing all the Stores at 1732020556120Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556121 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556121Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020556121Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556121Cleaning up temporary data from old regions at 1732020556131 (+10 ms)Region opened successfully at 1732020556137 (+6 ms) 2024-11-19T12:49:16,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:49:16,137 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:49:16,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:49:16,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:49:16,137 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:49:16,138 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:49:16,138 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020556137Disabling compacts and flushes for region at 1732020556137Disabling writes for close at 1732020556137Writing region close event to WAL at 1732020556138 (+1 ms)Closed at 1732020556138 2024-11-19T12:49:16,140 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:49:16,140 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:49:16,140 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:49:16,142 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:49:16,143 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:49:16,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:16,264 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:16,294 DEBUG [aba5a916dfea:42979 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:49:16,294 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:16,296 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39863,1732020554954, state=OPENING 2024-11-19T12:49:16,402 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:49:16,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:16,417 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:49:16,418 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:49:16,418 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39863,1732020554954}] 2024-11-19T12:49:16,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:49:16,418 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:49:16,572 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:49:16,574 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38649, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:49:16,578 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:49:16,578 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:49:16,579 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C39863%2C1732020554954.meta, suffix=.meta, logDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954, archiveDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs, maxLogs=32 2024-11-19T12:49:16,580 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39863%2C1732020554954.meta.1732020556580.meta 2024-11-19T12:49:16,584 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.meta.1732020556580.meta 2024-11-19T12:49:16,586 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35553:35553),(127.0.0.1/127.0.0.1:46349:46349)] 2024-11-19T12:49:16,586 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:49:16,587 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:49:16,587 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:49:16,588 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:49:16,589 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:49:16,589 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,589 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,590 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:49:16,590 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:49:16,590 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,591 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:49:16,591 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:49:16,591 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,592 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:49:16,593 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:49:16,593 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,594 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:49:16,594 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:49:16,594 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740 2024-11-19T12:49:16,596 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740 2024-11-19T12:49:16,597 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:49:16,597 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:49:16,597 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:49:16,599 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:49:16,600 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=804170, jitterRate=0.022556230425834656}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:49:16,600 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:49:16,600 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020556587Writing region info on filesystem at 1732020556587Initializing all the Stores at 1732020556588 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556588Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556588Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020556588Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020556588Cleaning up temporary data from old regions at 1732020556597 (+9 ms)Running coprocessor post-open hooks at 1732020556600 (+3 ms)Region opened successfully at 1732020556600 2024-11-19T12:49:16,601 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020556572 2024-11-19T12:49:16,603 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:49:16,603 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:49:16,604 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:16,605 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,39863,1732020554954, state=OPEN 2024-11-19T12:49:16,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:49:16,652 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:49:16,653 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:16,653 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:49:16,653 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:49:16,655 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:49:16,656 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,39863,1732020554954 in 235 msec 2024-11-19T12:49:16,658 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:49:16,658 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 516 msec 2024-11-19T12:49:16,658 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:49:16,658 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:49:16,660 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:49:16,660 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39863,1732020554954, seqNum=-1] 2024-11-19T12:49:16,660 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:49:16,661 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42539, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:49:16,666 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 969 msec 2024-11-19T12:49:16,667 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020556667, completionTime=-1 2024-11-19T12:49:16,667 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:49:16,667 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020616669 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020676669 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:42979, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,669 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,670 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,671 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.664sec 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:49:16,673 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:49:16,675 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:49:16,676 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:49:16,676 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@1d38de93, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:49:16,676 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,42979,1732020554722-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:49:16,676 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,42979,-1 for getting cluster id 2024-11-19T12:49:16,676 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:49:16,677 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '41f3ed59-9b13-4a8b-aa55-1484120f079b' 2024-11-19T12:49:16,677 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:49:16,677 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "41f3ed59-9b13-4a8b-aa55-1484120f079b" 2024-11-19T12:49:16,678 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@dc61e48, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:49:16,678 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,42979,-1] 2024-11-19T12:49:16,678 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:49:16,678 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:49:16,679 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60814, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:49:16,680 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@15c5bcd0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:49:16,680 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:49:16,681 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,39863,1732020554954, seqNum=-1] 2024-11-19T12:49:16,682 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:49:16,683 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38318, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:49:16,685 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,42979,1732020554722 2024-11-19T12:49:16,685 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:49:16,688 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:49:16,688 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-19T12:49:16,689 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is aba5a916dfea,42979,1732020554722 2024-11-19T12:49:16,690 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5a510e81 2024-11-19T12:49:16,690 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-19T12:49:16,691 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60826, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-19T12:49:16,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-19T12:49:16,691 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-19T12:49:16,692 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:49:16,693 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-19T12:49:16,695 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-19T12:49:16,695 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:16,695 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-19T12:49:16,696 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:49:16,696 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-19T12:49:16,703 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741835_1011 (size=381) 2024-11-19T12:49:16,704 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741835_1011 (size=381) 2024-11-19T12:49:16,705 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 13fd330ee27d6492fa0139cddb90d364, NAME => 'TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c 2024-11-19T12:49:16,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741836_1012 (size=64) 2024-11-19T12:49:16,712 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741836_1012 (size=64) 2024-11-19T12:49:16,712 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:16,712 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 13fd330ee27d6492fa0139cddb90d364, disabling compactions & flushes 2024-11-19T12:49:16,712 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:16,713 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:16,713 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. after waiting 0 ms 2024-11-19T12:49:16,713 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:16,713 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:16,713 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 13fd330ee27d6492fa0139cddb90d364: Waiting for close lock at 1732020556712Disabling compacts and flushes for region at 1732020556712Disabling writes for close at 1732020556713 (+1 ms)Writing region close event to WAL at 1732020556713Closed at 1732020556713 2024-11-19T12:49:16,714 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-19T12:49:16,714 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732020556714"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020556714"}]},"ts":"1732020556714"} 2024-11-19T12:49:16,717 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-19T12:49:16,718 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-19T12:49:16,718 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020556718"}]},"ts":"1732020556718"} 2024-11-19T12:49:16,721 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-19T12:49:16,721 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, ASSIGN}] 2024-11-19T12:49:16,722 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, ASSIGN 2024-11-19T12:49:16,724 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, ASSIGN; state=OFFLINE, location=aba5a916dfea,39863,1732020554954; forceNewPlan=false, retain=false 2024-11-19T12:49:16,874 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=13fd330ee27d6492fa0139cddb90d364, regionState=OPENING, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:16,877 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, ASSIGN because future has completed 2024-11-19T12:49:16,878 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954}] 2024-11-19T12:49:17,040 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:17,040 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 13fd330ee27d6492fa0139cddb90d364, NAME => 'TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:49:17,040 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,040 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:17,040 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,040 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,042 INFO [StoreOpener-13fd330ee27d6492fa0139cddb90d364-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,044 INFO [StoreOpener-13fd330ee27d6492fa0139cddb90d364-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 13fd330ee27d6492fa0139cddb90d364 columnFamilyName info 2024-11-19T12:49:17,044 DEBUG [StoreOpener-13fd330ee27d6492fa0139cddb90d364-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:17,044 INFO [StoreOpener-13fd330ee27d6492fa0139cddb90d364-1 {}] regionserver.HStore(327): Store=13fd330ee27d6492fa0139cddb90d364/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:17,045 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,046 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,046 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,046 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,046 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,048 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,050 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:49:17,051 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 13fd330ee27d6492fa0139cddb90d364; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726752, jitterRate=-0.07588803768157959}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:49:17,051 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:17,052 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 13fd330ee27d6492fa0139cddb90d364: Running coprocessor pre-open hook at 1732020557040Writing region info on filesystem at 1732020557040Initializing all the Stores at 1732020557041 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020557041Cleaning up temporary data from old regions at 1732020557047 (+6 ms)Running coprocessor post-open hooks at 1732020557051 (+4 ms)Region opened successfully at 1732020557052 (+1 ms) 2024-11-19T12:49:17,053 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., pid=6, masterSystemTime=1732020557031 2024-11-19T12:49:17,055 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:17,055 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:17,056 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=13fd330ee27d6492fa0139cddb90d364, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:17,059 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 because future has completed 2024-11-19T12:49:17,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-19T12:49:17,066 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 in 186 msec 2024-11-19T12:49:17,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-19T12:49:17,069 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, ASSIGN in 345 msec 2024-11-19T12:49:17,070 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-19T12:49:17,070 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732020557070"}]},"ts":"1732020557070"} 2024-11-19T12:49:17,074 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-19T12:49:17,075 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-19T12:49:17,078 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 383 msec 2024-11-19T12:49:17,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:17,265 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:17,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,403 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,404 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,409 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,914 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:49:17,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,915 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,916 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,918 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,942 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:17,951 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:18,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:18,266 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:19,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:19,267 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:20,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:20,268 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:21,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:21,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:21,854 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:49:21,855 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-19T12:49:22,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:22,269 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:23,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:23,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:23,372 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-19T12:49:23,372 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-19T12:49:23,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:49:23,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-19T12:49:23,373 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-19T12:49:23,373 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-19T12:49:23,374 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-19T12:49:23,374 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-19T12:49:24,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:24,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:25,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:25,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:26,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:26,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:26,760 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=42979 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-19T12:49:26,760 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-19T12:49:26,760 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-19T12:49:26,763 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-19T12:49:26,763 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:26,766 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2] 2024-11-19T12:49:26,781 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:26,781 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:26,802 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/bb752281479e4bd7aa9861a0054b76f8 is 1080, key is row0001/info:/1732020566767/Put/seqid=0 2024-11-19T12:49:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741837_1013 (size=12509) 2024-11-19T12:49:26,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741837_1013 (size=12509) 2024-11-19T12:49:26,842 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(5310): Region is too busy due to exceeding memstore size limit. org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) ~[classes/:?] at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) ~[hbase-common-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) ~[classes/:?] at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) ~[classes/:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) ~[classes/:4.0.0-alpha-1-SNAPSHOT] 2024-11-19T12:49:26,845 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] ipc.CallRunner(138): callId: 35 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38318 deadline: 1732020576841, exception=org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:26,871 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2, error=org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T12:49:26,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 is org.apache.hadoop.hbase.RegionTooBusyException: org.apache.hadoop.hbase.RegionTooBusyException: Over memstore limit=32.0 K, regionName=13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 at org.apache.hadoop.hbase.regionserver.HRegion.checkResources(HRegion.java:5308) at org.apache.hadoop.hbase.regionserver.HRegion.lambda$put$11(HRegion.java:3329) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:216) at org.apache.hadoop.hbase.regionserver.HRegion.put(HRegion.java:3322) at org.apache.hadoop.hbase.regionserver.RSRpcServices.put(RSRpcServices.java:3018) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2981) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T12:49:26,872 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(76): Will not update region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 because the exception is null or not the one we care about 2024-11-19T12:49:27,227 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/bb752281479e4bd7aa9861a0054b76f8 2024-11-19T12:49:27,245 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/bb752281479e4bd7aa9861a0054b76f8 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8 2024-11-19T12:49:27,253 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8, entries=7, sequenceid=11, filesize=12.2 K 2024-11-19T12:49:27,254 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=23.12 KB/23672 for 13fd330ee27d6492fa0139cddb90d364 in 473ms, sequenceid=11, compaction requested=false 2024-11-19T12:49:27,255 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:27,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:27,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:28,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:28,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:29,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:29,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:30,274 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:30,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:31,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:31,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:32,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:32,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:33,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:33,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:34,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:34,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:35,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:35,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:36,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:36,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:36,960 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:36,960 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=24.17 KB heapSize=26.13 KB 2024-11-19T12:49:36,965 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/78a974084a3545d39496a23ca277de78 is 1080, key is row0008/info:/1732020566782/Put/seqid=0 2024-11-19T12:49:36,970 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741838_1014 (size=29761) 2024-11-19T12:49:36,971 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741838_1014 (size=29761) 2024-11-19T12:49:36,971 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=24.17 KB at sequenceid=37 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/78a974084a3545d39496a23ca277de78 2024-11-19T12:49:36,977 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/78a974084a3545d39496a23ca277de78 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 2024-11-19T12:49:36,983 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78, entries=23, sequenceid=37, filesize=29.1 K 2024-11-19T12:49:36,984 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~24.17 KB/24748, heapSize ~26.11 KB/26736, currentSize=2.10 KB/2152 for 13fd330ee27d6492fa0139cddb90d364 in 24ms, sequenceid=37, compaction requested=false 2024-11-19T12:49:36,984 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:36,984 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=41.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:36,984 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:36,984 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 because midkey is the same as first or last row 2024-11-19T12:49:37,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:37,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:38,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:38,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:38,978 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:38,978 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:38,983 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/f3f036e7cc41456b9002b2b41304919d is 1080, key is row0031/info:/1732020576961/Put/seqid=0 2024-11-19T12:49:38,988 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741839_1015 (size=12509) 2024-11-19T12:49:38,989 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741839_1015 (size=12509) 2024-11-19T12:49:38,989 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=47 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/f3f036e7cc41456b9002b2b41304919d 2024-11-19T12:49:38,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/f3f036e7cc41456b9002b2b41304919d as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d 2024-11-19T12:49:39,003 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d, entries=7, sequenceid=47, filesize=12.2 K 2024-11-19T12:49:39,004 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 13fd330ee27d6492fa0139cddb90d364 in 26ms, sequenceid=47, compaction requested=true 2024-11-19T12:49:39,004 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:39,004 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=53.5 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,004 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,004 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 because midkey is the same as first or last row 2024-11-19T12:49:39,004 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd330ee27d6492fa0139cddb90d364:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:39,005 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:39,005 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:39,005 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:39,005 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T12:49:39,006 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 54779 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:39,006 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): 13fd330ee27d6492fa0139cddb90d364/info is initiating minor compaction (all files) 2024-11-19T12:49:39,006 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 13fd330ee27d6492fa0139cddb90d364/info in TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:39,006 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp, totalSize=53.5 K 2024-11-19T12:49:39,007 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting bb752281479e4bd7aa9861a0054b76f8, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732020566767 2024-11-19T12:49:39,007 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 78a974084a3545d39496a23ca277de78, keycount=23, bloomtype=ROW, size=29.1 K, encoding=NONE, compression=NONE, seqNum=37, earliestPutTs=1732020566782 2024-11-19T12:49:39,007 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting f3f036e7cc41456b9002b2b41304919d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732020576961 2024-11-19T12:49:39,009 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/01170fec64354547885d495f0601f62b is 1080, key is row0038/info:/1732020578980/Put/seqid=0 2024-11-19T12:49:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741840_1016 (size=16817) 2024-11-19T12:49:39,014 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741840_1016 (size=16817) 2024-11-19T12:49:39,016 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=61 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/01170fec64354547885d495f0601f62b 2024-11-19T12:49:39,023 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd330ee27d6492fa0139cddb90d364#info#compaction#59 average throughput is 18.98 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:39,024 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/fb4ba02050164c05baec324687410927 is 1080, key is row0001/info:/1732020566767/Put/seqid=0 2024-11-19T12:49:39,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/01170fec64354547885d495f0601f62b as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b 2024-11-19T12:49:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741841_1017 (size=44978) 2024-11-19T12:49:39,030 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741841_1017 (size=44978) 2024-11-19T12:49:39,032 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b, entries=11, sequenceid=61, filesize=16.4 K 2024-11-19T12:49:39,033 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=11.56 KB/11836 for 13fd330ee27d6492fa0139cddb90d364 in 28ms, sequenceid=61, compaction requested=false 2024-11-19T12:49:39,033 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:39,033 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=69.9 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,033 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,034 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 because midkey is the same as first or last row 2024-11-19T12:49:39,034 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:39,034 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T12:49:39,039 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/fb4ba02050164c05baec324687410927 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 2024-11-19T12:49:39,041 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/7e04d3dc826d4cfb97f86e280869595b is 1080, key is row0049/info:/1732020579006/Put/seqid=0 2024-11-19T12:49:39,047 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 13fd330ee27d6492fa0139cddb90d364/info of 13fd330ee27d6492fa0139cddb90d364 into fb4ba02050164c05baec324687410927(size=43.9 K), total size for store is 60.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:39,047 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., storeName=13fd330ee27d6492fa0139cddb90d364/info, priority=13, startTime=1732020579004; duration=0sec 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 because midkey is the same as first or last row 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 because midkey is the same as first or last row 2024-11-19T12:49:39,047 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=60.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,048 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,048 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 because midkey is the same as first or last row 2024-11-19T12:49:39,048 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:39,048 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd330ee27d6492fa0139cddb90d364:info 2024-11-19T12:49:39,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741842_1018 (size=17894) 2024-11-19T12:49:39,056 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741842_1018 (size=17894) 2024-11-19T12:49:39,056 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=76 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/7e04d3dc826d4cfb97f86e280869595b 2024-11-19T12:49:39,065 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/7e04d3dc826d4cfb97f86e280869595b as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b 2024-11-19T12:49:39,070 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b, entries=12, sequenceid=76, filesize=17.5 K 2024-11-19T12:49:39,071 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 13fd330ee27d6492fa0139cddb90d364 in 37ms, sequenceid=76, compaction requested=true 2024-11-19T12:49:39,071 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:39,072 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=77.8 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,072 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,072 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 because midkey is the same as first or last row 2024-11-19T12:49:39,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd330ee27d6492fa0139cddb90d364:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:39,072 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:39,072 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:39,073 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 79689 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:39,073 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): 13fd330ee27d6492fa0139cddb90d364/info is initiating minor compaction (all files) 2024-11-19T12:49:39,073 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 13fd330ee27d6492fa0139cddb90d364/info in TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:39,073 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp, totalSize=77.8 K 2024-11-19T12:49:39,074 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting fb4ba02050164c05baec324687410927, keycount=37, bloomtype=ROW, size=43.9 K, encoding=NONE, compression=NONE, seqNum=47, earliestPutTs=1732020566767 2024-11-19T12:49:39,074 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 01170fec64354547885d495f0601f62b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=61, earliestPutTs=1732020578980 2024-11-19T12:49:39,075 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7e04d3dc826d4cfb97f86e280869595b, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732020579006 2024-11-19T12:49:39,086 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd330ee27d6492fa0139cddb90d364#info#compaction#61 average throughput is 30.78 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:39,086 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/03f52cc4a49a4cd9a4470fc3beefeef4 is 1080, key is row0001/info:/1732020566767/Put/seqid=0 2024-11-19T12:49:39,090 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741843_1019 (size=69920) 2024-11-19T12:49:39,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741843_1019 (size=69920) 2024-11-19T12:49:39,097 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/03f52cc4a49a4cd9a4470fc3beefeef4 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 2024-11-19T12:49:39,104 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 13fd330ee27d6492fa0139cddb90d364/info of 13fd330ee27d6492fa0139cddb90d364 into 03f52cc4a49a4cd9a4470fc3beefeef4(size=68.3 K), total size for store is 68.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:39,104 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:39,104 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., storeName=13fd330ee27d6492fa0139cddb90d364/info, priority=13, startTime=1732020579072; duration=0sec 2024-11-19T12:49:39,104 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,104 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,104 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=68.3 K, sizeToCheck=16.0 K 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:39,105 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd330ee27d6492fa0139cddb90d364:info 2024-11-19T12:49:39,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:39,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:40,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:40,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:41,055 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,055 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:41,060 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/6cc106e9b0874cc7b9de16ebffc55869 is 1080, key is row0061/info:/1732020579035/Put/seqid=0 2024-11-19T12:49:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741844_1020 (size=12509) 2024-11-19T12:49:41,073 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741844_1020 (size=12509) 2024-11-19T12:49:41,073 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=88 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/6cc106e9b0874cc7b9de16ebffc55869 2024-11-19T12:49:41,080 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/6cc106e9b0874cc7b9de16ebffc55869 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869 2024-11-19T12:49:41,086 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869, entries=7, sequenceid=88, filesize=12.2 K 2024-11-19T12:49:41,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 13fd330ee27d6492fa0139cddb90d364 in 31ms, sequenceid=88, compaction requested=false 2024-11-19T12:49:41,087 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:41,087 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=80.5 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,087 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,087 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:41,087 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,087 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-19T12:49:41,091 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/83a988d2e28c4a438dd8dd85cac0404a is 1080, key is row0068/info:/1732020581056/Put/seqid=0 2024-11-19T12:49:41,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741845_1021 (size=22222) 2024-11-19T12:49:41,096 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741845_1021 (size=22222) 2024-11-19T12:49:41,097 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=107 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/83a988d2e28c4a438dd8dd85cac0404a 2024-11-19T12:49:41,103 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/83a988d2e28c4a438dd8dd85cac0404a as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a 2024-11-19T12:49:41,109 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a, entries=16, sequenceid=107, filesize=21.7 K 2024-11-19T12:49:41,110 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=10.51 KB/10760 for 13fd330ee27d6492fa0139cddb90d364 in 23ms, sequenceid=107, compaction requested=true 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=102.2 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 13fd330ee27d6492fa0139cddb90d364:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:41,111 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:41,111 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:41,111 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,111 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T12:49:41,112 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104651 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:41,112 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): 13fd330ee27d6492fa0139cddb90d364/info is initiating minor compaction (all files) 2024-11-19T12:49:41,112 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 13fd330ee27d6492fa0139cddb90d364/info in TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:41,112 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp, totalSize=102.2 K 2024-11-19T12:49:41,113 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 03f52cc4a49a4cd9a4470fc3beefeef4, keycount=60, bloomtype=ROW, size=68.3 K, encoding=NONE, compression=NONE, seqNum=76, earliestPutTs=1732020566767 2024-11-19T12:49:41,113 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6cc106e9b0874cc7b9de16ebffc55869, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=88, earliestPutTs=1732020579035 2024-11-19T12:49:41,114 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 83a988d2e28c4a438dd8dd85cac0404a, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1732020581056 2024-11-19T12:49:41,116 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/477d36dd487046fcad09438d0573e451 is 1080, key is row0084/info:/1732020581088/Put/seqid=0 2024-11-19T12:49:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741846_1022 (size=16817) 2024-11-19T12:49:41,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741846_1022 (size=16817) 2024-11-19T12:49:41,125 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/477d36dd487046fcad09438d0573e451 2024-11-19T12:49:41,128 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 13fd330ee27d6492fa0139cddb90d364#info#compaction#65 average throughput is 21.29 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:41,129 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/dc270dd4ca24440c8b972df9f1983d65 is 1080, key is row0001/info:/1732020566767/Put/seqid=0 2024-11-19T12:49:41,131 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/477d36dd487046fcad09438d0573e451 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/477d36dd487046fcad09438d0573e451 2024-11-19T12:49:41,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741847_1023 (size=94870) 2024-11-19T12:49:41,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741847_1023 (size=94870) 2024-11-19T12:49:41,137 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/477d36dd487046fcad09438d0573e451, entries=11, sequenceid=121, filesize=16.4 K 2024-11-19T12:49:41,138 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=2.10 KB/2152 for 13fd330ee27d6492fa0139cddb90d364 in 27ms, sequenceid=121, compaction requested=false 2024-11-19T12:49:41,138 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:41,138 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=118.6 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,138 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,138 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 because midkey is the same as first or last row 2024-11-19T12:49:41,140 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/dc270dd4ca24440c8b972df9f1983d65 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65 2024-11-19T12:49:41,147 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 13fd330ee27d6492fa0139cddb90d364/info of 13fd330ee27d6492fa0139cddb90d364 into dc270dd4ca24440c8b972df9f1983d65(size=92.6 K), total size for store is 109.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 13fd330ee27d6492fa0139cddb90d364: 2024-11-19T12:49:41,147 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., storeName=13fd330ee27d6492fa0139cddb90d364/info, priority=13, startTime=1732020581111; duration=0sec 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=109.1 K, sizeToCheck=16.0 K 2024-11-19T12:49:41,147 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-19T12:49:41,148 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:41,148 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:41,148 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 13fd330ee27d6492fa0139cddb90d364:info 2024-11-19T12:49:41,150 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42979 {}] assignment.AssignmentManager(1363): Split request from aba5a916dfea,39863,1732020554954, parent={ENCODED => 13fd330ee27d6492fa0139cddb90d364, NAME => 'TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-19T12:49:41,156 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42979 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:41,160 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42979 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=13fd330ee27d6492fa0139cddb90d364, daughterA=9097f41c01b839a6e252df28290af8d9, daughterB=c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:41,162 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=13fd330ee27d6492fa0139cddb90d364, daughterA=9097f41c01b839a6e252df28290af8d9, daughterB=c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:41,162 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=13fd330ee27d6492fa0139cddb90d364, daughterA=9097f41c01b839a6e252df28290af8d9, daughterB=c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:41,162 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=13fd330ee27d6492fa0139cddb90d364, daughterA=9097f41c01b839a6e252df28290af8d9, daughterB=c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:41,169 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, UNASSIGN}] 2024-11-19T12:49:41,171 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, UNASSIGN 2024-11-19T12:49:41,173 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=13fd330ee27d6492fa0139cddb90d364, regionState=CLOSING, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:41,175 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, UNASSIGN because future has completed 2024-11-19T12:49:41,176 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-19T12:49:41,176 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954}] 2024-11-19T12:49:41,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:41,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:41,334 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,334 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-19T12:49:41,335 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 13fd330ee27d6492fa0139cddb90d364, disabling compactions & flushes 2024-11-19T12:49:41,335 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:41,335 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:41,335 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. after waiting 0 ms 2024-11-19T12:49:41,335 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:41,335 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 13fd330ee27d6492fa0139cddb90d364 1/1 column families, dataSize=2.10 KB heapSize=2.50 KB 2024-11-19T12:49:41,340 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/e940277152d14f4a83d43e7ee3baac6d is 1080, key is row0095/info:/1732020581113/Put/seqid=0 2024-11-19T12:49:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741848_1024 (size=7112) 2024-11-19T12:49:41,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741848_1024 (size=7112) 2024-11-19T12:49:41,345 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=2.10 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/e940277152d14f4a83d43e7ee3baac6d 2024-11-19T12:49:41,351 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/.tmp/info/e940277152d14f4a83d43e7ee3baac6d as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/e940277152d14f4a83d43e7ee3baac6d 2024-11-19T12:49:41,357 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/e940277152d14f4a83d43e7ee3baac6d, entries=2, sequenceid=127, filesize=6.9 K 2024-11-19T12:49:41,358 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 13fd330ee27d6492fa0139cddb90d364 in 23ms, sequenceid=127, compaction requested=true 2024-11-19T12:49:41,359 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a] to archive 2024-11-19T12:49:41,360 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:49:41,362 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/bb752281479e4bd7aa9861a0054b76f8 2024-11-19T12:49:41,363 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/78a974084a3545d39496a23ca277de78 2024-11-19T12:49:41,364 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/fb4ba02050164c05baec324687410927 2024-11-19T12:49:41,365 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/f3f036e7cc41456b9002b2b41304919d 2024-11-19T12:49:41,367 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/01170fec64354547885d495f0601f62b 2024-11-19T12:49:41,368 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/03f52cc4a49a4cd9a4470fc3beefeef4 2024-11-19T12:49:41,369 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/7e04d3dc826d4cfb97f86e280869595b 2024-11-19T12:49:41,370 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/6cc106e9b0874cc7b9de16ebffc55869 2024-11-19T12:49:41,372 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/83a988d2e28c4a438dd8dd85cac0404a 2024-11-19T12:49:41,378 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-19T12:49:41,379 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. 2024-11-19T12:49:41,379 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 13fd330ee27d6492fa0139cddb90d364: Waiting for close lock at 1732020581335Running coprocessor pre-close hooks at 1732020581335Disabling compacts and flushes for region at 1732020581335Disabling writes for close at 1732020581335Obtaining lock to block concurrent updates at 1732020581335Preparing flush snapshotting stores in 13fd330ee27d6492fa0139cddb90d364 at 1732020581335Finished memstore snapshotting TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., syncing WAL and waiting on mvcc, flushsize=dataSize=2152, getHeapSize=2544, getOffHeapSize=0, getCellsCount=2 at 1732020581336 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. at 1732020581336Flushing 13fd330ee27d6492fa0139cddb90d364/info: creating writer at 1732020581336Flushing 13fd330ee27d6492fa0139cddb90d364/info: appending metadata at 1732020581339 (+3 ms)Flushing 13fd330ee27d6492fa0139cddb90d364/info: closing flushed file at 1732020581339Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7ee94edf: reopening flushed file at 1732020581351 (+12 ms)Finished flush of dataSize ~2.10 KB/2152, heapSize ~2.48 KB/2544, currentSize=0 B/0 for 13fd330ee27d6492fa0139cddb90d364 in 23ms, sequenceid=127, compaction requested=true at 1732020581358 (+7 ms)Writing region close event to WAL at 1732020581374 (+16 ms)Running coprocessor post-close hooks at 1732020581379 (+5 ms)Closed at 1732020581379 2024-11-19T12:49:41,381 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,381 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=13fd330ee27d6492fa0139cddb90d364, regionState=CLOSED 2024-11-19T12:49:41,383 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 because future has completed 2024-11-19T12:49:41,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-19T12:49:41,387 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 13fd330ee27d6492fa0139cddb90d364, server=aba5a916dfea,39863,1732020554954 in 209 msec 2024-11-19T12:49:41,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-19T12:49:41,389 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=13fd330ee27d6492fa0139cddb90d364, UNASSIGN in 218 msec 2024-11-19T12:49:41,398 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:41,401 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=13fd330ee27d6492fa0139cddb90d364, threads=3 2024-11-19T12:49:41,403 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/477d36dd487046fcad09438d0573e451 for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,403 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/e940277152d14f4a83d43e7ee3baac6d for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,403 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65 for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,413 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/e940277152d14f4a83d43e7ee3baac6d, top=true 2024-11-19T12:49:41,421 DEBUG [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/477d36dd487046fcad09438d0573e451, top=true 2024-11-19T12:49:41,427 INFO [StoreFileSplitter-pool-0 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451 for child: c1f15182103f19dfacb6bc5f9facbedf, parent: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,427 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/477d36dd487046fcad09438d0573e451 for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,427 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d for child: c1f15182103f19dfacb6bc5f9facbedf, parent: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,427 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/e940277152d14f4a83d43e7ee3baac6d for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741849_1025 (size=27) 2024-11-19T12:49:41,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741849_1025 (size=27) 2024-11-19T12:49:41,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741850_1026 (size=27) 2024-11-19T12:49:41,443 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741850_1026 (size=27) 2024-11-19T12:49:41,444 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65 for region: 13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:49:41,446 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 13fd330ee27d6492fa0139cddb90d364 Daughter A: [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364] storefiles, Daughter B: [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d] storefiles. 2024-11-19T12:49:41,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741851_1027 (size=71) 2024-11-19T12:49:41,458 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741851_1027 (size=71) 2024-11-19T12:49:41,461 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:41,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741852_1028 (size=71) 2024-11-19T12:49:41,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741852_1028 (size=71) 2024-11-19T12:49:41,876 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:41,888 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-19T12:49:41,891 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-19T12:49:41,894 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732020581894"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732020581894"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732020581894"}]},"ts":"1732020581894"} 2024-11-19T12:49:41,895 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732020581894"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020581894"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732020581894"}]},"ts":"1732020581894"} 2024-11-19T12:49:41,895 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732020581894"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732020581894"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732020581894"}]},"ts":"1732020581894"} 2024-11-19T12:49:41,912 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9097f41c01b839a6e252df28290af8d9, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1f15182103f19dfacb6bc5f9facbedf, ASSIGN}] 2024-11-19T12:49:41,914 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1f15182103f19dfacb6bc5f9facbedf, ASSIGN 2024-11-19T12:49:41,914 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9097f41c01b839a6e252df28290af8d9, ASSIGN 2024-11-19T12:49:41,915 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1f15182103f19dfacb6bc5f9facbedf, ASSIGN; state=SPLITTING_NEW, location=aba5a916dfea,39863,1732020554954; forceNewPlan=false, retain=false 2024-11-19T12:49:41,915 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9097f41c01b839a6e252df28290af8d9, ASSIGN; state=SPLITTING_NEW, location=aba5a916dfea,39863,1732020554954; forceNewPlan=false, retain=false 2024-11-19T12:49:42,065 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c1f15182103f19dfacb6bc5f9facbedf, regionState=OPENING, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:42,065 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9097f41c01b839a6e252df28290af8d9, regionState=OPENING, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:42,069 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9097f41c01b839a6e252df28290af8d9, ASSIGN because future has completed 2024-11-19T12:49:42,070 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9097f41c01b839a6e252df28290af8d9, server=aba5a916dfea,39863,1732020554954}] 2024-11-19T12:49:42,070 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1f15182103f19dfacb6bc5f9facbedf, ASSIGN because future has completed 2024-11-19T12:49:42,072 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1f15182103f19dfacb6bc5f9facbedf, server=aba5a916dfea,39863,1732020554954}] 2024-11-19T12:49:42,227 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:49:42,227 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 9097f41c01b839a6e252df28290af8d9, NAME => 'TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-19T12:49:42,228 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,228 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:42,228 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,228 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,229 INFO [StoreOpener-9097f41c01b839a6e252df28290af8d9-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,230 INFO [StoreOpener-9097f41c01b839a6e252df28290af8d9-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 9097f41c01b839a6e252df28290af8d9 columnFamilyName info 2024-11-19T12:49:42,230 DEBUG [StoreOpener-9097f41c01b839a6e252df28290af8d9-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:42,245 DEBUG [StoreOpener-9097f41c01b839a6e252df28290af8d9-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-bottom 2024-11-19T12:49:42,245 INFO [StoreOpener-9097f41c01b839a6e252df28290af8d9-1 {}] regionserver.HStore(327): Store=9097f41c01b839a6e252df28290af8d9/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:42,245 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,246 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,247 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,247 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,247 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,249 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,250 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 9097f41c01b839a6e252df28290af8d9; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=745884, jitterRate=-0.05155999958515167}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:49:42,250 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:49:42,250 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 9097f41c01b839a6e252df28290af8d9: Running coprocessor pre-open hook at 1732020582228Writing region info on filesystem at 1732020582228Initializing all the Stores at 1732020582229 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020582229Cleaning up temporary data from old regions at 1732020582247 (+18 ms)Running coprocessor post-open hooks at 1732020582250 (+3 ms)Region opened successfully at 1732020582250 2024-11-19T12:49:42,251 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9., pid=12, masterSystemTime=1732020582223 2024-11-19T12:49:42,251 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 9097f41c01b839a6e252df28290af8d9:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:42,251 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:42,251 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-19T12:49:42,252 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:49:42,252 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): 9097f41c01b839a6e252df28290af8d9/info is initiating minor compaction (all files) 2024-11-19T12:49:42,252 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 9097f41c01b839a6e252df28290af8d9/info in TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:49:42,252 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-bottom] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/.tmp, totalSize=92.6 K 2024-11-19T12:49:42,253 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364, keycount=41, bloomtype=ROW, size=92.6 K, encoding=NONE, compression=NONE, seqNum=107, earliestPutTs=1732020566767 2024-11-19T12:49:42,254 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:49:42,254 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:49:42,254 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:42,254 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => c1f15182103f19dfacb6bc5f9facbedf, NAME => 'TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-19T12:49:42,254 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,254 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=9097f41c01b839a6e252df28290af8d9, regionState=OPEN, openSeqNum=131, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:42,254 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:49:42,255 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,255 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,256 INFO [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,257 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-19T12:49:42,257 INFO [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region c1f15182103f19dfacb6bc5f9facbedf columnFamilyName info 2024-11-19T12:49:42,257 DEBUG [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:49:42,257 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-19T12:49:42,257 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.11 KB heapSize=8.96 KB 2024-11-19T12:49:42,257 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 9097f41c01b839a6e252df28290af8d9, server=aba5a916dfea,39863,1732020554954 because future has completed 2024-11-19T12:49:42,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=10 2024-11-19T12:49:42,262 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 9097f41c01b839a6e252df28290af8d9, server=aba5a916dfea,39863,1732020554954 in 189 msec 2024-11-19T12:49:42,263 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=9097f41c01b839a6e252df28290af8d9, ASSIGN in 350 msec 2024-11-19T12:49:42,272 DEBUG [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451 2024-11-19T12:49:42,274 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 9097f41c01b839a6e252df28290af8d9#info#compaction#67 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:42,275 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/.tmp/info/bf4a989fa5f046bf8a5a0733b4ba59ef is 1080, key is row0001/info:/1732020566767/Put/seqid=0 2024-11-19T12:49:42,277 DEBUG [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d 2024-11-19T12:49:42,278 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/6181f4934b3846879c8f59f1a4b64b31 is 193, key is TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf./info:regioninfo/1732020582065/Put/seqid=0 2024-11-19T12:49:42,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:42,283 DEBUG [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-top 2024-11-19T12:49:42,283 INFO [StoreOpener-c1f15182103f19dfacb6bc5f9facbedf-1 {}] regionserver.HStore(327): Store=c1f15182103f19dfacb6bc5f9facbedf/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:49:42,283 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:42,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741853_1029 (size=70862) 2024-11-19T12:49:42,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741853_1029 (size=70862) 2024-11-19T12:49:42,285 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,286 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,286 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,286 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,288 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,289 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened c1f15182103f19dfacb6bc5f9facbedf; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=821170, jitterRate=0.044172242283821106}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-19T12:49:42,289 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:42,290 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for c1f15182103f19dfacb6bc5f9facbedf: Running coprocessor pre-open hook at 1732020582255Writing region info on filesystem at 1732020582255Initializing all the Stores at 1732020582255Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020582255Cleaning up temporary data from old regions at 1732020582286 (+31 ms)Running coprocessor post-open hooks at 1732020582289 (+3 ms)Region opened successfully at 1732020582290 (+1 ms) 2024-11-19T12:49:42,290 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., pid=13, masterSystemTime=1732020582223 2024-11-19T12:49:42,291 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 2 2024-11-19T12:49:42,291 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:42,291 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:42,291 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/.tmp/info/bf4a989fa5f046bf8a5a0733b4ba59ef as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/bf4a989fa5f046bf8a5a0733b4ba59ef 2024-11-19T12:49:42,292 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:42,292 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:42,292 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:42,293 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-top, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=116.0 K 2024-11-19T12:49:42,293 DEBUG [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:42,293 INFO [RS_OPEN_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:42,293 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364, keycount=41, bloomtype=ROW, size=92.6 K, encoding=NONE, compression=NONE, seqNum=108, earliestPutTs=1732020566767 2024-11-19T12:49:42,294 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=121, earliestPutTs=1732020581088 2024-11-19T12:49:42,294 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=c1f15182103f19dfacb6bc5f9facbedf, regionState=OPEN, openSeqNum=131, regionLocation=aba5a916dfea,39863,1732020554954 2024-11-19T12:49:42,294 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d, keycount=2, bloomtype=ROW, size=6.9 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732020581113 2024-11-19T12:49:42,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741854_1030 (size=9847) 2024-11-19T12:49:42,296 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741854_1030 (size=9847) 2024-11-19T12:49:42,297 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure c1f15182103f19dfacb6bc5f9facbedf, server=aba5a916dfea,39863,1732020554954 because future has completed 2024-11-19T12:49:42,297 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.92 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/6181f4934b3846879c8f59f1a4b64b31 2024-11-19T12:49:42,299 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 9097f41c01b839a6e252df28290af8d9/info of 9097f41c01b839a6e252df28290af8d9 into bf4a989fa5f046bf8a5a0733b4ba59ef(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:42,299 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 9097f41c01b839a6e252df28290af8d9: 2024-11-19T12:49:42,300 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9., storeName=9097f41c01b839a6e252df28290af8d9/info, priority=15, startTime=1732020582251; duration=0sec 2024-11-19T12:49:42,300 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:42,300 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 9097f41c01b839a6e252df28290af8d9:info 2024-11-19T12:49:42,301 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=11 2024-11-19T12:49:42,301 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure c1f15182103f19dfacb6bc5f9facbedf, server=aba5a916dfea,39863,1732020554954 in 227 msec 2024-11-19T12:49:42,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=11, resume processing ppid=7 2024-11-19T12:49:42,318 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=c1f15182103f19dfacb6bc5f9facbedf, ASSIGN in 389 msec 2024-11-19T12:49:42,321 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=13fd330ee27d6492fa0139cddb90d364, daughterA=9097f41c01b839a6e252df28290af8d9, daughterB=c1f15182103f19dfacb6bc5f9facbedf in 1.1620 sec 2024-11-19T12:49:42,336 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/ns/c1ce8225565c433da8a1c19aa4a97006 is 43, key is default/ns:d/1732020556662/Put/seqid=0 2024-11-19T12:49:42,339 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#70 average throughput is 17.96 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:42,339 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/250ece2a8d1f4803969a50a95f4091fd is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:42,344 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741855_1031 (size=5153) 2024-11-19T12:49:42,345 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741855_1031 (size=5153) 2024-11-19T12:49:42,345 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/ns/c1ce8225565c433da8a1c19aa4a97006 2024-11-19T12:49:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741856_1032 (size=42984) 2024-11-19T12:49:42,356 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741856_1032 (size=42984) 2024-11-19T12:49:42,363 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/250ece2a8d1f4803969a50a95f4091fd as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/250ece2a8d1f4803969a50a95f4091fd 2024-11-19T12:49:42,374 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 250ece2a8d1f4803969a50a95f4091fd(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:42,374 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:42,374 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020582290; duration=0sec 2024-11-19T12:49:42,374 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:42,374 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:49:42,377 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/table/0998358e0ec948aea4668e6d3eab39fe is 65, key is TestLogRolling-testLogRolling/table:state/1732020557070/Put/seqid=0 2024-11-19T12:49:42,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741857_1033 (size=5340) 2024-11-19T12:49:42,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741857_1033 (size=5340) 2024-11-19T12:49:42,382 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/table/0998358e0ec948aea4668e6d3eab39fe 2024-11-19T12:49:42,388 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/6181f4934b3846879c8f59f1a4b64b31 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/info/6181f4934b3846879c8f59f1a4b64b31 2024-11-19T12:49:42,394 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/info/6181f4934b3846879c8f59f1a4b64b31, entries=30, sequenceid=17, filesize=9.6 K 2024-11-19T12:49:42,395 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/ns/c1ce8225565c433da8a1c19aa4a97006 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/ns/c1ce8225565c433da8a1c19aa4a97006 2024-11-19T12:49:42,401 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/ns/c1ce8225565c433da8a1c19aa4a97006, entries=2, sequenceid=17, filesize=5.0 K 2024-11-19T12:49:42,402 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/table/0998358e0ec948aea4668e6d3eab39fe as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/table/0998358e0ec948aea4668e6d3eab39fe 2024-11-19T12:49:42,408 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/table/0998358e0ec948aea4668e6d3eab39fe, entries=2, sequenceid=17, filesize=5.2 K 2024-11-19T12:49:42,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.11 KB/5234, heapSize ~8.66 KB/8872, currentSize=705 B/705 for 1588230740 in 152ms, sequenceid=17, compaction requested=false 2024-11-19T12:49:42,409 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T12:49:43,118 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] ipc.CallRunner(138): callId: 103 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:38318 deadline: 1732020593118, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. is not online on aba5a916dfea,39863,1732020554954 2024-11-19T12:49:43,119 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. is not online on aba5a916dfea,39863,1732020554954 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T12:49:43,119 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364. is not online on aba5a916dfea,39863,1732020554954 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-19T12:49:43,119 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732020556691.13fd330ee27d6492fa0139cddb90d364., hostname=aba5a916dfea,39863,1732020554954, seqNum=2 from cache 2024-11-19T12:49:43,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:43,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:44,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:44,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:44,704 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-19T12:49:45,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:45,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:46,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:46,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:46,379 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,380 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,381 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,382 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,922 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-19T12:49:46,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,924 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,925 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,927 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,958 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,962 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:46,966 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-19T12:49:47,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:47,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:48,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:48,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:49,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:49,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:50,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:50,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:51,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:51,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:52,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:52,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:53,178 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., hostname=aba5a916dfea,39863,1732020554954, seqNum=131] 2024-11-19T12:49:53,191 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:53,191 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:53,195 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0bb533fb73ce4759a8476f84b8eacd0d is 1080, key is row0097/info:/1732020593179/Put/seqid=0 2024-11-19T12:49:53,200 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741858_1034 (size=12516) 2024-11-19T12:49:53,201 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0bb533fb73ce4759a8476f84b8eacd0d 2024-11-19T12:49:53,201 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741858_1034 (size=12516) 2024-11-19T12:49:53,208 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0bb533fb73ce4759a8476f84b8eacd0d as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d 2024-11-19T12:49:53,220 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d, entries=7, sequenceid=141, filesize=12.2 K 2024-11-19T12:49:53,221 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for c1f15182103f19dfacb6bc5f9facbedf in 30ms, sequenceid=141, compaction requested=false 2024-11-19T12:49:53,221 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:53,222 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:53,222 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T12:49:53,226 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/849aa0407a9f4180811f5b2a2bf8ea03 is 1080, key is row0104/info:/1732020593193/Put/seqid=0 2024-11-19T12:49:53,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741859_1035 (size=19000) 2024-11-19T12:49:53,230 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741859_1035 (size=19000) 2024-11-19T12:49:53,231 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=157 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/849aa0407a9f4180811f5b2a2bf8ea03 2024-11-19T12:49:53,238 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/849aa0407a9f4180811f5b2a2bf8ea03 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03 2024-11-19T12:49:53,243 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03, entries=13, sequenceid=157, filesize=18.6 K 2024-11-19T12:49:53,244 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for c1f15182103f19dfacb6bc5f9facbedf in 22ms, sequenceid=157, compaction requested=true 2024-11-19T12:49:53,244 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:53,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:53,244 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:53,244 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:53,244 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:53,245 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-19T12:49:53,246 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74500 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:53,246 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:53,246 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:53,246 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/250ece2a8d1f4803969a50a95f4091fd, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=72.8 K 2024-11-19T12:49:53,246 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 250ece2a8d1f4803969a50a95f4091fd, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732020579038 2024-11-19T12:49:53,247 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0bb533fb73ce4759a8476f84b8eacd0d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732020593179 2024-11-19T12:49:53,247 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 849aa0407a9f4180811f5b2a2bf8ea03, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732020593193 2024-11-19T12:49:53,248 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/da96cc9882f74cd08c84e8f820bf5da0 is 1080, key is row0117/info:/1732020593223/Put/seqid=0 2024-11-19T12:49:53,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741860_1036 (size=16828) 2024-11-19T12:49:53,252 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741860_1036 (size=16828) 2024-11-19T12:49:53,253 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=171 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/da96cc9882f74cd08c84e8f820bf5da0 2024-11-19T12:49:53,258 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/da96cc9882f74cd08c84e8f820bf5da0 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0 2024-11-19T12:49:53,261 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#75 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:53,261 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/cd8186f95f914cdbb10f3a9a475b39ae is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:53,264 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0, entries=11, sequenceid=171, filesize=16.4 K 2024-11-19T12:49:53,265 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=1.05 KB/1076 for c1f15182103f19dfacb6bc5f9facbedf in 20ms, sequenceid=171, compaction requested=false 2024-11-19T12:49:53,265 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741861_1037 (size=64714) 2024-11-19T12:49:53,275 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741861_1037 (size=64714) 2024-11-19T12:49:53,280 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/cd8186f95f914cdbb10f3a9a475b39ae as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/cd8186f95f914cdbb10f3a9a475b39ae 2024-11-19T12:49:53,287 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into cd8186f95f914cdbb10f3a9a475b39ae(size=63.2 K), total size for store is 79.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:53,287 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:53,287 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020593244; duration=0sec 2024-11-19T12:49:53,287 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:53,287 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:49:53,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:53,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:54,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:54,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:55,262 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:55,262 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:55,267 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 is 1080, key is row0128/info:/1732020593246/Put/seqid=0 2024-11-19T12:49:55,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741862_1038 (size=12516) 2024-11-19T12:49:55,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741862_1038 (size=12516) 2024-11-19T12:49:55,285 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=182 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 2024-11-19T12:49:55,291 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 2024-11-19T12:49:55,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:55,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:55,296 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6, entries=7, sequenceid=182, filesize=12.2 K 2024-11-19T12:49:55,297 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=16.81 KB/17216 for c1f15182103f19dfacb6bc5f9facbedf in 35ms, sequenceid=182, compaction requested=true 2024-11-19T12:49:55,297 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:55,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:55,298 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:55,298 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:55,299 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 94058 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:55,299 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:55,299 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:55,299 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/cd8186f95f914cdbb10f3a9a475b39ae, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=91.9 K 2024-11-19T12:49:55,299 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:55,300 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=18.91 KB heapSize=20.50 KB 2024-11-19T12:49:55,300 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting cd8186f95f914cdbb10f3a9a475b39ae, keycount=55, bloomtype=ROW, size=63.2 K, encoding=NONE, compression=NONE, seqNum=157, earliestPutTs=1732020579038 2024-11-19T12:49:55,300 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting da96cc9882f74cd08c84e8f820bf5da0, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=171, earliestPutTs=1732020593223 2024-11-19T12:49:55,300 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 12bf4d78ce0f4ed4a7d3ae0e4cf762c6, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732020593246 2024-11-19T12:49:55,303 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/73934042341949fc9cdcfa0227d55937 is 1080, key is row0135/info:/1732020595264/Put/seqid=0 2024-11-19T12:49:55,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741863_1039 (size=24394) 2024-11-19T12:49:55,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741863_1039 (size=24394) 2024-11-19T12:49:55,310 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=18.91 KB at sequenceid=203 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/73934042341949fc9cdcfa0227d55937 2024-11-19T12:49:55,313 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#78 average throughput is 37.45 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:55,314 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/9f61e77347a54dbab0fc0e5503ef99ac is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:55,316 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/73934042341949fc9cdcfa0227d55937 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937 2024-11-19T12:49:55,322 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937, entries=18, sequenceid=203, filesize=23.8 K 2024-11-19T12:49:55,323 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~18.91 KB/19368, heapSize ~20.48 KB/20976, currentSize=8.41 KB/8608 for c1f15182103f19dfacb6bc5f9facbedf in 24ms, sequenceid=203, compaction requested=false 2024-11-19T12:49:55,323 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:55,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741864_1040 (size=84293) 2024-11-19T12:49:55,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741864_1040 (size=84293) 2024-11-19T12:49:55,331 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/9f61e77347a54dbab0fc0e5503ef99ac as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/9f61e77347a54dbab0fc0e5503ef99ac 2024-11-19T12:49:55,337 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 9f61e77347a54dbab0fc0e5503ef99ac(size=82.3 K), total size for store is 106.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:55,337 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:55,337 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020595298; duration=0sec 2024-11-19T12:49:55,338 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:55,338 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:49:56,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:56,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:57,093 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-19T12:49:57,093 INFO [master/aba5a916dfea:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-19T12:49:57,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:57,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:57,324 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:57,325 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-19T12:49:57,329 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/fbe73fcd5c8945c7a2b30919a31edace is 1080, key is row0153/info:/1732020595301/Put/seqid=0 2024-11-19T12:49:57,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741865_1041 (size=14672) 2024-11-19T12:49:57,340 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741865_1041 (size=14672) 2024-11-19T12:49:57,341 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=216 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/fbe73fcd5c8945c7a2b30919a31edace 2024-11-19T12:49:57,347 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/fbe73fcd5c8945c7a2b30919a31edace as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace 2024-11-19T12:49:57,353 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace, entries=9, sequenceid=216, filesize=14.3 K 2024-11-19T12:49:57,354 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=12.61 KB/12912 for c1f15182103f19dfacb6bc5f9facbedf in 30ms, sequenceid=216, compaction requested=true 2024-11-19T12:49:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:57,354 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:57,354 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:57,355 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 123359 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:57,356 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:57,356 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:57,356 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/9f61e77347a54dbab0fc0e5503ef99ac, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=120.5 K 2024-11-19T12:49:57,356 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:57,356 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T12:49:57,356 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9f61e77347a54dbab0fc0e5503ef99ac, keycount=73, bloomtype=ROW, size=82.3 K, encoding=NONE, compression=NONE, seqNum=182, earliestPutTs=1732020579038 2024-11-19T12:49:57,356 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 73934042341949fc9cdcfa0227d55937, keycount=18, bloomtype=ROW, size=23.8 K, encoding=NONE, compression=NONE, seqNum=203, earliestPutTs=1732020595264 2024-11-19T12:49:57,357 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting fbe73fcd5c8945c7a2b30919a31edace, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732020595301 2024-11-19T12:49:57,360 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8cfdb214fe88401cbbb69da3c773fd99 is 1080, key is row0162/info:/1732020597326/Put/seqid=0 2024-11-19T12:49:57,373 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#81 average throughput is 51.31 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:57,373 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/398e305b6cf2431f9660bbd4e51a4312 is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:57,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741866_1042 (size=19000) 2024-11-19T12:49:57,376 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741866_1042 (size=19000) 2024-11-19T12:49:57,377 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=232 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8cfdb214fe88401cbbb69da3c773fd99 2024-11-19T12:49:57,385 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8cfdb214fe88401cbbb69da3c773fd99 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99 2024-11-19T12:49:57,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99, entries=13, sequenceid=232, filesize=18.6 K 2024-11-19T12:49:57,393 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=14.71 KB/15064 for c1f15182103f19dfacb6bc5f9facbedf in 36ms, sequenceid=232, compaction requested=false 2024-11-19T12:49:57,393 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741867_1043 (size=113509) 2024-11-19T12:49:57,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741867_1043 (size=113509) 2024-11-19T12:49:57,395 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:57,395 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=16.81 KB heapSize=18.25 KB 2024-11-19T12:49:57,400 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/93c2f9242fd441f8b0949abae400ac37 is 1080, key is row0175/info:/1732020597357/Put/seqid=0 2024-11-19T12:49:57,402 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/398e305b6cf2431f9660bbd4e51a4312 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/398e305b6cf2431f9660bbd4e51a4312 2024-11-19T12:49:57,409 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 398e305b6cf2431f9660bbd4e51a4312(size=110.8 K), total size for store is 129.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:57,409 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741868_1044 (size=22238) 2024-11-19T12:49:57,409 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:57,409 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020597354; duration=0sec 2024-11-19T12:49:57,409 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:57,409 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:49:57,410 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741868_1044 (size=22238) 2024-11-19T12:49:57,410 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=16.81 KB at sequenceid=251 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/93c2f9242fd441f8b0949abae400ac37 2024-11-19T12:49:57,416 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/93c2f9242fd441f8b0949abae400ac37 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37 2024-11-19T12:49:57,422 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37, entries=16, sequenceid=251, filesize=21.7 K 2024-11-19T12:49:57,423 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~16.81 KB/17216, heapSize ~18.23 KB/18672, currentSize=2.10 KB/2152 for c1f15182103f19dfacb6bc5f9facbedf in 28ms, sequenceid=251, compaction requested=true 2024-11-19T12:49:57,423 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:57,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:57,424 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:57,424 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:57,425 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 154747 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:57,425 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:57,425 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:57,425 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/398e305b6cf2431f9660bbd4e51a4312, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=151.1 K 2024-11-19T12:49:57,426 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 398e305b6cf2431f9660bbd4e51a4312, keycount=100, bloomtype=ROW, size=110.8 K, encoding=NONE, compression=NONE, seqNum=216, earliestPutTs=1732020579038 2024-11-19T12:49:57,426 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8cfdb214fe88401cbbb69da3c773fd99, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=232, earliestPutTs=1732020597326 2024-11-19T12:49:57,427 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 93c2f9242fd441f8b0949abae400ac37, keycount=16, bloomtype=ROW, size=21.7 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732020597357 2024-11-19T12:49:57,439 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#83 average throughput is 66.19 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:57,440 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/74bb121e2f154730b900706b2bbe71d6 is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:57,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741869_1045 (size=145078) 2024-11-19T12:49:57,456 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741869_1045 (size=145078) 2024-11-19T12:49:57,463 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/74bb121e2f154730b900706b2bbe71d6 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/74bb121e2f154730b900706b2bbe71d6 2024-11-19T12:49:57,470 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 74bb121e2f154730b900706b2bbe71d6(size=141.7 K), total size for store is 141.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:57,470 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:57,471 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020597423; duration=0sec 2024-11-19T12:49:57,471 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:57,471 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:49:58,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:58,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:59,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:59,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:49:59,415 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:59,415 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:49:59,420 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0ed01feed4404dd2b03ce1e87bf4689c is 1080, key is row0191/info:/1732020597397/Put/seqid=0 2024-11-19T12:49:59,428 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741870_1046 (size=12521) 2024-11-19T12:49:59,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741870_1046 (size=12521) 2024-11-19T12:49:59,429 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=263 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0ed01feed4404dd2b03ce1e87bf4689c 2024-11-19T12:49:59,436 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/0ed01feed4404dd2b03ce1e87bf4689c as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c 2024-11-19T12:49:59,442 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c, entries=7, sequenceid=263, filesize=12.2 K 2024-11-19T12:49:59,443 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for c1f15182103f19dfacb6bc5f9facbedf in 28ms, sequenceid=263, compaction requested=false 2024-11-19T12:49:59,443 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:59,444 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:59,444 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-19T12:49:59,448 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/5de10787b889405c9b84819b5fa276c8 is 1080, key is row0198/info:/1732020599417/Put/seqid=0 2024-11-19T12:49:59,453 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741871_1047 (size=19013) 2024-11-19T12:49:59,454 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741871_1047 (size=19013) 2024-11-19T12:49:59,454 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=279 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/5de10787b889405c9b84819b5fa276c8 2024-11-19T12:49:59,460 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/5de10787b889405c9b84819b5fa276c8 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8 2024-11-19T12:49:59,466 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8, entries=13, sequenceid=279, filesize=18.6 K 2024-11-19T12:49:59,467 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=11.56 KB/11836 for c1f15182103f19dfacb6bc5f9facbedf in 23ms, sequenceid=279, compaction requested=true 2024-11-19T12:49:59,467 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:59,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:49:59,468 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:59,468 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:49:59,469 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 176612 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:49:59,469 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:49:59,469 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:49:59,469 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/74bb121e2f154730b900706b2bbe71d6, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=172.5 K 2024-11-19T12:49:59,470 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:49:59,470 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T12:49:59,470 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting 74bb121e2f154730b900706b2bbe71d6, keycount=129, bloomtype=ROW, size=141.7 K, encoding=NONE, compression=NONE, seqNum=251, earliestPutTs=1732020579038 2024-11-19T12:49:59,470 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting 0ed01feed4404dd2b03ce1e87bf4689c, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=263, earliestPutTs=1732020597397 2024-11-19T12:49:59,470 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] compactions.Compactor(225): Compacting 5de10787b889405c9b84819b5fa276c8, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732020599417 2024-11-19T12:49:59,475 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8e985d4847b24e4db625a55952aafc11 is 1080, key is row0211/info:/1732020599445/Put/seqid=0 2024-11-19T12:49:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741872_1048 (size=17918) 2024-11-19T12:49:59,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741872_1048 (size=17918) 2024-11-19T12:49:59,481 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=294 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8e985d4847b24e4db625a55952aafc11 2024-11-19T12:49:59,483 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#87 average throughput is 50.97 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:49:59,484 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/3cdea3d2bce24f1283becc61cfd94bcb is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:49:59,488 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/8e985d4847b24e4db625a55952aafc11 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11 2024-11-19T12:49:59,492 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11, entries=12, sequenceid=294, filesize=17.5 K 2024-11-19T12:49:59,493 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=2.10 KB/2152 for c1f15182103f19dfacb6bc5f9facbedf in 23ms, sequenceid=294, compaction requested=false 2024-11-19T12:49:59,493 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741873_1049 (size=166762) 2024-11-19T12:49:59,496 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741873_1049 (size=166762) 2024-11-19T12:49:59,502 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/3cdea3d2bce24f1283becc61cfd94bcb as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/3cdea3d2bce24f1283becc61cfd94bcb 2024-11-19T12:49:59,508 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 3cdea3d2bce24f1283becc61cfd94bcb(size=162.9 K), total size for store is 180.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:49:59,508 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:49:59,508 INFO [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020599467; duration=0sec 2024-11-19T12:49:59,508 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:49:59,508 DEBUG [RS:0;aba5a916dfea:39863-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:50:00,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:00,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:01,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:01,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:01,485 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:50:01,485 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-19T12:50:01,489 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/4839ca6e894144f79712957f6480c02e is 1080, key is row0223/info:/1732020599471/Put/seqid=0 2024-11-19T12:50:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741874_1050 (size=12523) 2024-11-19T12:50:01,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741874_1050 (size=12523) 2024-11-19T12:50:01,495 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=305 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/4839ca6e894144f79712957f6480c02e 2024-11-19T12:50:01,501 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/4839ca6e894144f79712957f6480c02e as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e 2024-11-19T12:50:01,506 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e, entries=7, sequenceid=305, filesize=12.2 K 2024-11-19T12:50:01,507 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.46 KB/9684 for c1f15182103f19dfacb6bc5f9facbedf in 22ms, sequenceid=305, compaction requested=true 2024-11-19T12:50:01,508 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:01,508 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:50:01,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:50:01,508 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:50:01,508 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:50:01,508 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-19T12:50:01,509 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 197203 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:50:01,509 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:50:01,509 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:01,510 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/3cdea3d2bce24f1283becc61cfd94bcb, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=192.6 K 2024-11-19T12:50:01,510 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3cdea3d2bce24f1283becc61cfd94bcb, keycount=149, bloomtype=ROW, size=162.9 K, encoding=NONE, compression=NONE, seqNum=279, earliestPutTs=1732020579038 2024-11-19T12:50:01,510 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8e985d4847b24e4db625a55952aafc11, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=294, earliestPutTs=1732020599445 2024-11-19T12:50:01,511 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 4839ca6e894144f79712957f6480c02e, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732020599471 2024-11-19T12:50:01,512 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6a6a531bc30e4d45bf260b50af1d770b is 1080, key is row0230/info:/1732020601486/Put/seqid=0 2024-11-19T12:50:01,516 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741875_1051 (size=15760) 2024-11-19T12:50:01,517 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741875_1051 (size=15760) 2024-11-19T12:50:01,517 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=318 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6a6a531bc30e4d45bf260b50af1d770b 2024-11-19T12:50:01,525 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#90 average throughput is 57.46 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:50:01,526 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6858383ab10744779cc76b275706b959 is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:50:01,527 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6a6a531bc30e4d45bf260b50af1d770b as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b 2024-11-19T12:50:01,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741876_1052 (size=187373) 2024-11-19T12:50:01,532 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741876_1052 (size=187373) 2024-11-19T12:50:01,533 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b, entries=10, sequenceid=318, filesize=15.4 K 2024-11-19T12:50:01,535 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=10.51 KB/10760 for c1f15182103f19dfacb6bc5f9facbedf in 26ms, sequenceid=318, compaction requested=false 2024-11-19T12:50:01,535 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:01,536 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=39863 {}] regionserver.HRegion(8855): Flush requested on c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:50:01,537 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-19T12:50:01,539 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6858383ab10744779cc76b275706b959 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6858383ab10744779cc76b275706b959 2024-11-19T12:50:01,541 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6244806adff2433cb88540569151a1eb is 1080, key is row0240/info:/1732020601509/Put/seqid=0 2024-11-19T12:50:01,546 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 6858383ab10744779cc76b275706b959(size=183.0 K), total size for store is 198.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:50:01,546 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:01,546 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020601508; duration=0sec 2024-11-19T12:50:01,546 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:50:01,546 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:50:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741877_1053 (size=17918) 2024-11-19T12:50:01,548 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741877_1053 (size=17918) 2024-11-19T12:50:01,548 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=333 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6244806adff2433cb88540569151a1eb 2024-11-19T12:50:01,554 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/6244806adff2433cb88540569151a1eb as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb 2024-11-19T12:50:01,559 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb, entries=12, sequenceid=333, filesize=17.5 K 2024-11-19T12:50:01,560 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=5.25 KB/5380 for c1f15182103f19dfacb6bc5f9facbedf in 24ms, sequenceid=333, compaction requested=true 2024-11-19T12:50:01,560 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:01,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store c1f15182103f19dfacb6bc5f9facbedf:info, priority=-2147483648, current under compaction store size is 1 2024-11-19T12:50:01,561 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:50:01,561 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-19T12:50:01,561 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 221051 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-19T12:50:01,562 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1541): c1f15182103f19dfacb6bc5f9facbedf/info is initiating minor compaction (all files) 2024-11-19T12:50:01,562 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of c1f15182103f19dfacb6bc5f9facbedf/info in TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:01,562 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6858383ab10744779cc76b275706b959, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb] into tmpdir=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp, totalSize=215.9 K 2024-11-19T12:50:01,562 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6858383ab10744779cc76b275706b959, keycount=168, bloomtype=ROW, size=183.0 K, encoding=NONE, compression=NONE, seqNum=305, earliestPutTs=1732020579038 2024-11-19T12:50:01,562 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6a6a531bc30e4d45bf260b50af1d770b, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=318, earliestPutTs=1732020601486 2024-11-19T12:50:01,563 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] compactions.Compactor(225): Compacting 6244806adff2433cb88540569151a1eb, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=333, earliestPutTs=1732020601509 2024-11-19T12:50:01,576 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): c1f15182103f19dfacb6bc5f9facbedf#info#compaction#92 average throughput is 48.74 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-19T12:50:01,576 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/81bb81fbeee94843bf49ea389e5c3da7 is 1080, key is row0062/info:/1732020579038/Put/seqid=0 2024-11-19T12:50:01,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741878_1054 (size=211286) 2024-11-19T12:50:01,579 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741878_1054 (size=211286) 2024-11-19T12:50:01,584 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/81bb81fbeee94843bf49ea389e5c3da7 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/81bb81fbeee94843bf49ea389e5c3da7 2024-11-19T12:50:01,587 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 1588230740, had cached 0 bytes from a total of 20340 2024-11-19T12:50:01,591 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in c1f15182103f19dfacb6bc5f9facbedf/info of c1f15182103f19dfacb6bc5f9facbedf into 81bb81fbeee94843bf49ea389e5c3da7(size=206.3 K), total size for store is 206.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-19T12:50:01,591 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:01,591 INFO [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., storeName=c1f15182103f19dfacb6bc5f9facbedf/info, priority=13, startTime=1732020601560; duration=0sec 2024-11-19T12:50:01,592 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-19T12:50:01,592 DEBUG [RS:0;aba5a916dfea:39863-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: c1f15182103f19dfacb6bc5f9facbedf:info 2024-11-19T12:50:02,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:02,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:03,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:03,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:03,550 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-19T12:50:03,551 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39863%2C1732020554954.1732020603551 2024-11-19T12:50:03,559 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,559 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,559 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,560 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,560 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,560 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020556001 with entries=318, filesize=310.38 KB; new WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020603551 2024-11-19T12:50:03,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741833_1009 (size=317837) 2024-11-19T12:50:03,562 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741833_1009 (size=317837) 2024-11-19T12:50:03,569 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35553:35553),(127.0.0.1/127.0.0.1:46349:46349)] 2024-11-19T12:50:03,573 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 9097f41c01b839a6e252df28290af8d9: 2024-11-19T12:50:03,573 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing c1f15182103f19dfacb6bc5f9facbedf 1/1 column families, dataSize=5.25 KB heapSize=5.88 KB 2024-11-19T12:50:03,577 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/2ae9a966d77f4a7fb58eb63d7999420f is 1080, key is row0252/info:/1732020601538/Put/seqid=0 2024-11-19T12:50:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741880_1056 (size=10357) 2024-11-19T12:50:03,581 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741880_1056 (size=10357) 2024-11-19T12:50:03,582 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=5.25 KB at sequenceid=343 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/2ae9a966d77f4a7fb58eb63d7999420f 2024-11-19T12:50:03,588 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/.tmp/info/2ae9a966d77f4a7fb58eb63d7999420f as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/2ae9a966d77f4a7fb58eb63d7999420f 2024-11-19T12:50:03,593 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/2ae9a966d77f4a7fb58eb63d7999420f, entries=5, sequenceid=343, filesize=10.1 K 2024-11-19T12:50:03,595 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.25 KB/5380, heapSize ~5.86 KB/6000, currentSize=0 B/0 for c1f15182103f19dfacb6bc5f9facbedf in 21ms, sequenceid=343, compaction requested=false 2024-11-19T12:50:03,595 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for c1f15182103f19dfacb6bc5f9facbedf: 2024-11-19T12:50:03,595 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=705 B heapSize=2.05 KB 2024-11-19T12:50:03,599 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/0c30ad645f494902a3bce41c5bb99dec is 193, key is TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf./info:regioninfo/1732020582294/Put/seqid=0 2024-11-19T12:50:03,603 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741881_1057 (size=6223) 2024-11-19T12:50:03,604 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741881_1057 (size=6223) 2024-11-19T12:50:03,604 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=705 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/0c30ad645f494902a3bce41c5bb99dec 2024-11-19T12:50:03,610 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/.tmp/info/0c30ad645f494902a3bce41c5bb99dec as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/info/0c30ad645f494902a3bce41c5bb99dec 2024-11-19T12:50:03,616 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/info/0c30ad645f494902a3bce41c5bb99dec, entries=5, sequenceid=21, filesize=6.1 K 2024-11-19T12:50:03,617 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~705 B/705, heapSize ~1.29 KB/1320, currentSize=0 B/0 for 1588230740 in 22ms, sequenceid=21, compaction requested=false 2024-11-19T12:50:03,617 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-19T12:50:03,617 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C39863%2C1732020554954.1732020603617 2024-11-19T12:50:03,628 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,628 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,628 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,628 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,628 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020603551 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020603617 2024-11-19T12:50:03,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741879_1055 (size=731) 2024-11-19T12:50:03,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741879_1055 (size=731) 2024-11-19T12:50:03,631 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020556001 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs/aba5a916dfea%2C39863%2C1732020554954.1732020556001 2024-11-19T12:50:03,632 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/WALs/aba5a916dfea,39863,1732020554954/aba5a916dfea%2C39863%2C1732020554954.1732020603551 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs/aba5a916dfea%2C39863%2C1732020554954.1732020603551 2024-11-19T12:50:03,635 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35553:35553),(127.0.0.1/127.0.0.1:46349:46349)] 2024-11-19T12:50:03,636 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-19T12:50:03,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:50:03,636 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:50:03,636 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:03,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:03,636 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:03,636 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:50:03,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:50:03,636 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=2009537117, stopped=false 2024-11-19T12:50:03,637 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,42979,1732020554722 2024-11-19T12:50:03,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:03,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:03,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:03,689 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:50:03,689 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:03,689 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:03,690 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:03,690 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:50:03,690 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:03,690 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:03,690 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,39863,1732020554954' ***** 2024-11-19T12:50:03,690 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:50:03,691 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(3091): Received CLOSE for 9097f41c01b839a6e252df28290af8d9 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(3091): Received CLOSE for c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,39863,1732020554954 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:50:03,691 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 9097f41c01b839a6e252df28290af8d9, disabling compactions & flushes 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:39863. 2024-11-19T12:50:03,691 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:50:03,691 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:50:03,691 DEBUG [RS:0;aba5a916dfea:39863 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:03,691 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. after waiting 0 ms 2024-11-19T12:50:03,691 DEBUG [RS:0;aba5a916dfea:39863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:03,691 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:50:03,691 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:50:03,692 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-19T12:50:03,692 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1325): Online Regions={9097f41c01b839a6e252df28290af8d9=TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9., c1f15182103f19dfacb6bc5f9facbedf=TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf., 1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:50:03,692 DEBUG [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 9097f41c01b839a6e252df28290af8d9, c1f15182103f19dfacb6bc5f9facbedf 2024-11-19T12:50:03,692 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:50:03,692 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:50:03,692 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:50:03,692 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:50:03,692 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:50:03,692 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-bottom] to archive 2024-11-19T12:50:03,693 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:50:03,695 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:50:03,695 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=aba5a916dfea:42979 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-19T12:50:03,696 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-19T12:50:03,696 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-19T12:50:03,697 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:50:03,697 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:50:03,697 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020603692Running coprocessor pre-close hooks at 1732020603692Disabling compacts and flushes for region at 1732020603692Disabling writes for close at 1732020603692Writing region close event to WAL at 1732020603693 (+1 ms)Running coprocessor post-close hooks at 1732020603697 (+4 ms)Closed at 1732020603697 2024-11-19T12:50:03,697 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:50:03,699 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/9097f41c01b839a6e252df28290af8d9/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-19T12:50:03,700 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 9097f41c01b839a6e252df28290af8d9: Waiting for close lock at 1732020603691Running coprocessor pre-close hooks at 1732020603691Disabling compacts and flushes for region at 1732020603691Disabling writes for close at 1732020603691Writing region close event to WAL at 1732020603696 (+5 ms)Running coprocessor post-close hooks at 1732020603700 (+4 ms)Closed at 1732020603700 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732020581156.9097f41c01b839a6e252df28290af8d9. 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing c1f15182103f19dfacb6bc5f9facbedf, disabling compactions & flushes 2024-11-19T12:50:03,700 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. after waiting 0 ms 2024-11-19T12:50:03,700 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:03,701 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364->hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/13fd330ee27d6492fa0139cddb90d364/info/dc270dd4ca24440c8b972df9f1983d65-top, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/250ece2a8d1f4803969a50a95f4091fd, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/cd8186f95f914cdbb10f3a9a475b39ae, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/9f61e77347a54dbab0fc0e5503ef99ac, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/398e305b6cf2431f9660bbd4e51a4312, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/74bb121e2f154730b900706b2bbe71d6, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/3cdea3d2bce24f1283becc61cfd94bcb, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6858383ab10744779cc76b275706b959, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb] to archive 2024-11-19T12:50:03,702 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-19T12:50:03,704 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/dc270dd4ca24440c8b972df9f1983d65.13fd330ee27d6492fa0139cddb90d364 2024-11-19T12:50:03,705 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-477d36dd487046fcad09438d0573e451 2024-11-19T12:50:03,706 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/250ece2a8d1f4803969a50a95f4091fd to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/250ece2a8d1f4803969a50a95f4091fd 2024-11-19T12:50:03,708 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/TestLogRolling-testLogRolling=13fd330ee27d6492fa0139cddb90d364-e940277152d14f4a83d43e7ee3baac6d 2024-11-19T12:50:03,709 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0bb533fb73ce4759a8476f84b8eacd0d 2024-11-19T12:50:03,710 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/cd8186f95f914cdbb10f3a9a475b39ae to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/cd8186f95f914cdbb10f3a9a475b39ae 2024-11-19T12:50:03,711 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/849aa0407a9f4180811f5b2a2bf8ea03 2024-11-19T12:50:03,713 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/da96cc9882f74cd08c84e8f820bf5da0 2024-11-19T12:50:03,714 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/9f61e77347a54dbab0fc0e5503ef99ac to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/9f61e77347a54dbab0fc0e5503ef99ac 2024-11-19T12:50:03,715 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/12bf4d78ce0f4ed4a7d3ae0e4cf762c6 2024-11-19T12:50:03,716 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/73934042341949fc9cdcfa0227d55937 2024-11-19T12:50:03,718 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/398e305b6cf2431f9660bbd4e51a4312 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/398e305b6cf2431f9660bbd4e51a4312 2024-11-19T12:50:03,719 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/fbe73fcd5c8945c7a2b30919a31edace 2024-11-19T12:50:03,720 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8cfdb214fe88401cbbb69da3c773fd99 2024-11-19T12:50:03,721 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/74bb121e2f154730b900706b2bbe71d6 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/74bb121e2f154730b900706b2bbe71d6 2024-11-19T12:50:03,722 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/93c2f9242fd441f8b0949abae400ac37 2024-11-19T12:50:03,723 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/0ed01feed4404dd2b03ce1e87bf4689c 2024-11-19T12:50:03,724 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/3cdea3d2bce24f1283becc61cfd94bcb to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/3cdea3d2bce24f1283becc61cfd94bcb 2024-11-19T12:50:03,726 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/5de10787b889405c9b84819b5fa276c8 2024-11-19T12:50:03,727 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/8e985d4847b24e4db625a55952aafc11 2024-11-19T12:50:03,728 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6858383ab10744779cc76b275706b959 to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6858383ab10744779cc76b275706b959 2024-11-19T12:50:03,729 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/4839ca6e894144f79712957f6480c02e 2024-11-19T12:50:03,730 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6a6a531bc30e4d45bf260b50af1d770b 2024-11-19T12:50:03,731 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb to hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/archive/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/info/6244806adff2433cb88540569151a1eb 2024-11-19T12:50:03,731 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [250ece2a8d1f4803969a50a95f4091fd=42984, 0bb533fb73ce4759a8476f84b8eacd0d=12516, cd8186f95f914cdbb10f3a9a475b39ae=64714, 849aa0407a9f4180811f5b2a2bf8ea03=19000, da96cc9882f74cd08c84e8f820bf5da0=16828, 9f61e77347a54dbab0fc0e5503ef99ac=84293, 12bf4d78ce0f4ed4a7d3ae0e4cf762c6=12516, 73934042341949fc9cdcfa0227d55937=24394, 398e305b6cf2431f9660bbd4e51a4312=113509, fbe73fcd5c8945c7a2b30919a31edace=14672, 8cfdb214fe88401cbbb69da3c773fd99=19000, 74bb121e2f154730b900706b2bbe71d6=145078, 93c2f9242fd441f8b0949abae400ac37=22238, 0ed01feed4404dd2b03ce1e87bf4689c=12521, 3cdea3d2bce24f1283becc61cfd94bcb=166762, 5de10787b889405c9b84819b5fa276c8=19013, 8e985d4847b24e4db625a55952aafc11=17918, 6858383ab10744779cc76b275706b959=187373, 4839ca6e894144f79712957f6480c02e=12523, 6a6a531bc30e4d45bf260b50af1d770b=15760, 6244806adff2433cb88540569151a1eb=17918] 2024-11-19T12:50:03,735 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/data/default/TestLogRolling-testLogRolling/c1f15182103f19dfacb6bc5f9facbedf/recovered.edits/346.seqid, newMaxSeqId=346, maxSeqId=130 2024-11-19T12:50:03,736 INFO [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:03,736 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for c1f15182103f19dfacb6bc5f9facbedf: Waiting for close lock at 1732020603700Running coprocessor pre-close hooks at 1732020603700Disabling compacts and flushes for region at 1732020603700Disabling writes for close at 1732020603700Writing region close event to WAL at 1732020603732 (+32 ms)Running coprocessor post-close hooks at 1732020603736 (+4 ms)Closed at 1732020603736 2024-11-19T12:50:03,736 DEBUG [RS_CLOSE_REGION-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732020581156.c1f15182103f19dfacb6bc5f9facbedf. 2024-11-19T12:50:03,861 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:50:03,861 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:50:03,863 INFO [regionserver/aba5a916dfea:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:50:03,892 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,39863,1732020554954; all regions closed. 2024-11-19T12:50:03,893 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,893 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,893 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,893 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,893 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741834_1010 (size=8107) 2024-11-19T12:50:03,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741834_1010 (size=8107) 2024-11-19T12:50:03,900 DEBUG [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs 2024-11-19T12:50:03,900 INFO [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C39863%2C1732020554954.meta:.meta(num 1732020556580) 2024-11-19T12:50:03,900 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,900 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,900 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,901 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,901 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:03,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741882_1058 (size=780) 2024-11-19T12:50:03,903 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741882_1058 (size=780) 2024-11-19T12:50:03,905 DEBUG [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/oldWALs 2024-11-19T12:50:03,905 INFO [RS:0;aba5a916dfea:39863 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C39863%2C1732020554954:(num 1732020603617) 2024-11-19T12:50:03,905 DEBUG [RS:0;aba5a916dfea:39863 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:03,905 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:50:03,905 INFO [RS:0;aba5a916dfea:39863 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:50:03,905 INFO [RS:0;aba5a916dfea:39863 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:50:03,906 INFO [RS:0;aba5a916dfea:39863 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:50:03,906 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:50:03,906 INFO [RS:0;aba5a916dfea:39863 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39863 2024-11-19T12:50:03,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,39863,1732020554954 2024-11-19T12:50:03,913 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:50:03,914 INFO [RS:0;aba5a916dfea:39863 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:50:03,914 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,39863,1732020554954] 2024-11-19T12:50:03,930 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,39863,1732020554954 already deleted, retry=false 2024-11-19T12:50:03,930 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,39863,1732020554954 expired; onlineServers=0 2024-11-19T12:50:03,930 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,42979,1732020554722' ***** 2024-11-19T12:50:03,930 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:50:03,930 INFO [M:0;aba5a916dfea:42979 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:50:03,930 INFO [M:0;aba5a916dfea:42979 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:50:03,931 DEBUG [M:0;aba5a916dfea:42979 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:50:03,931 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:50:03,931 DEBUG [M:0;aba5a916dfea:42979 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:50:03,931 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020555704 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020555704,5,FailOnTimeoutGroup] 2024-11-19T12:50:03,931 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020555704 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020555704,5,FailOnTimeoutGroup] 2024-11-19T12:50:03,931 INFO [M:0;aba5a916dfea:42979 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:50:03,931 INFO [M:0;aba5a916dfea:42979 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:50:03,931 DEBUG [M:0;aba5a916dfea:42979 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:50:03,931 INFO [M:0;aba5a916dfea:42979 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:50:03,931 INFO [M:0;aba5a916dfea:42979 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:50:03,932 ERROR [M:0;aba5a916dfea:42979 {}] procedure2.ProcedureExecutor(763): There are still active thread in group java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10], see STDOUT java.lang.ThreadGroup[name=PEWorkerGroup,maxpri=10] Thread[IPC Parameter Sending Thread for localhost/127.0.0.1:42859,5,PEWorkerGroup] 2024-11-19T12:50:03,932 INFO [M:0;aba5a916dfea:42979 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:50:03,932 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:50:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:50:03,939 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:03,939 DEBUG [M:0;aba5a916dfea:42979 {}] zookeeper.ZKUtil(347): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:50:03,939 WARN [M:0;aba5a916dfea:42979 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:50:03,939 INFO [M:0;aba5a916dfea:42979 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/.lastflushedseqids 2024-11-19T12:50:03,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741883_1059 (size=228) 2024-11-19T12:50:03,948 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741883_1059 (size=228) 2024-11-19T12:50:03,948 INFO [M:0;aba5a916dfea:42979 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:50:03,948 INFO [M:0;aba5a916dfea:42979 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:50:03,948 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:50:03,949 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:03,949 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:03,949 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:50:03,949 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:03,949 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-19T12:50:03,967 DEBUG [M:0;aba5a916dfea:42979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d105cc1107164b16a109c670503a3d9e is 82, key is hbase:meta,,1/info:regioninfo/1732020556604/Put/seqid=0 2024-11-19T12:50:03,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741884_1060 (size=5672) 2024-11-19T12:50:03,992 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741884_1060 (size=5672) 2024-11-19T12:50:03,992 INFO [M:0;aba5a916dfea:42979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d105cc1107164b16a109c670503a3d9e 2024-11-19T12:50:04,019 DEBUG [M:0;aba5a916dfea:42979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f83c58c3dfc44fe5ad0a98a9fce23d51 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732020557077/Put/seqid=0 2024-11-19T12:50:04,022 INFO [RS:0;aba5a916dfea:39863 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:50:04,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:04,022 INFO [RS:0;aba5a916dfea:39863 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,39863,1732020554954; zookeeper connection closed. 2024-11-19T12:50:04,022 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:39863-0x101546dde980001, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:04,035 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@4e4ca0e6 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@4e4ca0e6 2024-11-19T12:50:04,035 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:50:04,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741885_1061 (size=7090) 2024-11-19T12:50:04,037 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741885_1061 (size=7090) 2024-11-19T12:50:04,038 INFO [M:0;aba5a916dfea:42979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f83c58c3dfc44fe5ad0a98a9fce23d51 2024-11-19T12:50:04,042 INFO [M:0;aba5a916dfea:42979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f83c58c3dfc44fe5ad0a98a9fce23d51 2024-11-19T12:50:04,056 DEBUG [M:0;aba5a916dfea:42979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff7b43b9252a4d1790126603adf8cbc1 is 69, key is aba5a916dfea,39863,1732020554954/rs:state/1732020555840/Put/seqid=0 2024-11-19T12:50:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741886_1062 (size=5156) 2024-11-19T12:50:04,061 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741886_1062 (size=5156) 2024-11-19T12:50:04,061 INFO [M:0;aba5a916dfea:42979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff7b43b9252a4d1790126603adf8cbc1 2024-11-19T12:50:04,082 DEBUG [M:0;aba5a916dfea:42979 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5770dea23fc34b29b9e7a5cb5c6fb719 is 52, key is load_balancer_on/state:d/1732020556687/Put/seqid=0 2024-11-19T12:50:04,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741887_1063 (size=5056) 2024-11-19T12:50:04,087 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741887_1063 (size=5056) 2024-11-19T12:50:04,087 INFO [M:0;aba5a916dfea:42979 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5770dea23fc34b29b9e7a5cb5c6fb719 2024-11-19T12:50:04,092 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/d105cc1107164b16a109c670503a3d9e as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d105cc1107164b16a109c670503a3d9e 2024-11-19T12:50:04,097 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/d105cc1107164b16a109c670503a3d9e, entries=8, sequenceid=125, filesize=5.5 K 2024-11-19T12:50:04,098 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/f83c58c3dfc44fe5ad0a98a9fce23d51 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f83c58c3dfc44fe5ad0a98a9fce23d51 2024-11-19T12:50:04,103 INFO [M:0;aba5a916dfea:42979 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for f83c58c3dfc44fe5ad0a98a9fce23d51 2024-11-19T12:50:04,103 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/f83c58c3dfc44fe5ad0a98a9fce23d51, entries=13, sequenceid=125, filesize=6.9 K 2024-11-19T12:50:04,104 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/ff7b43b9252a4d1790126603adf8cbc1 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ff7b43b9252a4d1790126603adf8cbc1 2024-11-19T12:50:04,108 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/ff7b43b9252a4d1790126603adf8cbc1, entries=1, sequenceid=125, filesize=5.0 K 2024-11-19T12:50:04,109 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/5770dea23fc34b29b9e7a5cb5c6fb719 as hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5770dea23fc34b29b9e7a5cb5c6fb719 2024-11-19T12:50:04,114 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:42859/user/jenkins/test-data/12793513-d01e-de5e-efa0-e21bd76fcd0c/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/5770dea23fc34b29b9e7a5cb5c6fb719, entries=1, sequenceid=125, filesize=4.9 K 2024-11-19T12:50:04,115 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=125, compaction requested=false 2024-11-19T12:50:04,117 INFO [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:04,118 DEBUG [M:0;aba5a916dfea:42979 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020603948Disabling compacts and flushes for region at 1732020603948Disabling writes for close at 1732020603949 (+1 ms)Obtaining lock to block concurrent updates at 1732020603949Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020603949Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1732020603949Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020603950 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020603950Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020603966 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020603966Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020603999 (+33 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020604018 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020604018Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020604042 (+24 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020604056 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020604056Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020604066 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020604082 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020604082Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@15f0ad8a: reopening flushed file at 1732020604092 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@d88f6be: reopening flushed file at 1732020604097 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7de649bf: reopening flushed file at 1732020604103 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@34ddd69b: reopening flushed file at 1732020604109 (+6 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 166ms, sequenceid=125, compaction requested=false at 1732020604115 (+6 ms)Writing region close event to WAL at 1732020604117 (+2 ms)Closed at 1732020604117 2024-11-19T12:50:04,118 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:04,118 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:04,118 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:04,118 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:04,118 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:32881 is added to blk_1073741830_1006 (size=61332) 2024-11-19T12:50:04,120 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36813 is added to blk_1073741830_1006 (size=61332) 2024-11-19T12:50:04,120 INFO [M:0;aba5a916dfea:42979 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:50:04,120 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:50:04,121 INFO [M:0;aba5a916dfea:42979 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42979 2024-11-19T12:50:04,121 INFO [M:0;aba5a916dfea:42979 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:50:04,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:04,253 INFO [M:0;aba5a916dfea:42979 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:50:04,253 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42979-0x101546dde980000, quorum=127.0.0.1:56416, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:04,256 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2a188763{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:04,256 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4f5b93c6{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:04,256 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:04,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d63e15b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:04,257 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54e8b7e4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:04,259 WARN [BP-1592302674-172.17.0.2-1732020553018 heartbeating to localhost/127.0.0.1:42859 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:50:04,259 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:50:04,259 WARN [BP-1592302674-172.17.0.2-1732020553018 heartbeating to localhost/127.0.0.1:42859 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1592302674-172.17.0.2-1732020553018 (Datanode Uuid 367e7123-590e-41c7-91a6-3064a29331a2) service to localhost/127.0.0.1:42859 2024-11-19T12:50:04,259 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:50:04,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data3/current/BP-1592302674-172.17.0.2-1732020553018 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:04,260 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data4/current/BP-1592302674-172.17.0.2-1732020553018 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:04,260 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:50:04,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@637baa5c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:04,263 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3c9ced57{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:04,263 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:04,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4a0bbcc4{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:04,263 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a86eb96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:04,265 WARN [BP-1592302674-172.17.0.2-1732020553018 heartbeating to localhost/127.0.0.1:42859 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:50:04,265 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:50:04,265 WARN [BP-1592302674-172.17.0.2-1732020553018 heartbeating to localhost/127.0.0.1:42859 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1592302674-172.17.0.2-1732020553018 (Datanode Uuid 74b7389d-2bdc-4c37-9033-4be9efb8b9c0) service to localhost/127.0.0.1:42859 2024-11-19T12:50:04,265 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:50:04,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data1/current/BP-1592302674-172.17.0.2-1732020553018 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:04,266 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/cluster_8b90f3dc-ec18-806d-4887-8a0b188a707f/data/data2/current/BP-1592302674-172.17.0.2-1732020553018 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:04,266 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:50:04,272 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7700407f{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:50:04,273 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e430f9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:04,273 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:04,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@621c58fb{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:04,273 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6aaaafb9{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:04,281 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:50:04,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:04,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:04,319 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:50:04,328 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: LeaseRenewer:jenkins@localhost:42859 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42859 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42859 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:42859 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:42859 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:42859 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=515 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=254 (was 204) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=5380 (was 5615) 2024-11-19T12:50:04,335 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=515, MaxFileDescriptor=1048576, SystemLoadAverage=254, ProcessCount=11, AvailableMemoryMB=5380 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.log.dir so I do NOT create it in target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/4bcd8d8e-00c8-ceb2-c025-5fdf588de138/hadoop.tmp.dir so I do NOT create it in target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593, deleteOnExit=true 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/test.cache.data in system properties and HBase conf 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.tmp.dir in system properties and HBase conf 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir in system properties and HBase conf 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-19T12:50:04,336 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-19T12:50:04,336 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/nfs.dump.dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/java.io.tmpdir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-19T12:50:04,337 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-19T12:50:04,350 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:50:04,667 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:50:04,672 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:50:04,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:50:04,675 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:50:04,675 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:50:04,676 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:50:04,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@224b825{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:50:04,676 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3d03122a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:50:04,786 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@66d38df4{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/java.io.tmpdir/jetty-localhost-44565-hadoop-hdfs-3_4_1-tests_jar-_-any-454115868242906758/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:50:04,786 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c6d62fb{HTTP/1.1, (http/1.1)}{localhost:44565} 2024-11-19T12:50:04,786 INFO [Time-limited test {}] server.Server(415): Started @298484ms 2024-11-19T12:50:04,799 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-19T12:50:05,000 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:50:05,003 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:50:05,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:50:05,003 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:50:05,003 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-19T12:50:05,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3f0aa271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:50:05,004 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3bcfd91f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:50:05,128 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d4cb733{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/java.io.tmpdir/jetty-localhost-39967-hadoop-hdfs-3_4_1-tests_jar-_-any-1793729559231085071/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:05,129 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@45f7f9ff{HTTP/1.1, (http/1.1)}{localhost:39967} 2024-11-19T12:50:05,129 INFO [Time-limited test {}] server.Server(415): Started @298827ms 2024-11-19T12:50:05,130 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:50:05,166 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-19T12:50:05,169 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-19T12:50:05,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-19T12:50:05,170 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-19T12:50:05,170 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-19T12:50:05,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5fb67b12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,AVAILABLE} 2024-11-19T12:50:05,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2c01fadd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-19T12:50:05,283 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2def2572{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/java.io.tmpdir/jetty-localhost-44551-hadoop-hdfs-3_4_1-tests_jar-_-any-11742591273210054772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:05,283 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@48189acf{HTTP/1.1, (http/1.1)}{localhost:44551} 2024-11-19T12:50:05,283 INFO [Time-limited test {}] server.Server(415): Started @298981ms 2024-11-19T12:50:05,284 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-19T12:50:05,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:05,297 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:05,727 WARN [Thread-2501 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data1/current/BP-477597641-172.17.0.2-1732020604354/current, will proceed with Du for space computation calculation, 2024-11-19T12:50:05,727 WARN [Thread-2502 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data2/current/BP-477597641-172.17.0.2-1732020604354/current, will proceed with Du for space computation calculation, 2024-11-19T12:50:05,743 WARN [Thread-2466 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:50:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60c1b599ce3ca8da with lease ID 0x2bd6dfeb6e7e44a2: Processing first storage report for DS-c0184897-b237-42b1-a1c5-4693eb145898 from datanode DatanodeRegistration(127.0.0.1:41851, datanodeUuid=eb0f41f7-c3e6-4345-b63a-3ba0a4c37eb7, infoPort=40965, infoSecurePort=0, ipcPort=35987, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354) 2024-11-19T12:50:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60c1b599ce3ca8da with lease ID 0x2bd6dfeb6e7e44a2: from storage DS-c0184897-b237-42b1-a1c5-4693eb145898 node DatanodeRegistration(127.0.0.1:41851, datanodeUuid=eb0f41f7-c3e6-4345-b63a-3ba0a4c37eb7, infoPort=40965, infoSecurePort=0, ipcPort=35987, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:50:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x60c1b599ce3ca8da with lease ID 0x2bd6dfeb6e7e44a2: Processing first storage report for DS-4da1f079-f0fe-4175-af92-6bd30d3a136c from datanode DatanodeRegistration(127.0.0.1:41851, datanodeUuid=eb0f41f7-c3e6-4345-b63a-3ba0a4c37eb7, infoPort=40965, infoSecurePort=0, ipcPort=35987, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354) 2024-11-19T12:50:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x60c1b599ce3ca8da with lease ID 0x2bd6dfeb6e7e44a2: from storage DS-4da1f079-f0fe-4175-af92-6bd30d3a136c node DatanodeRegistration(127.0.0.1:41851, datanodeUuid=eb0f41f7-c3e6-4345-b63a-3ba0a4c37eb7, infoPort=40965, infoSecurePort=0, ipcPort=35987, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:50:05,883 WARN [Thread-2513 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data3/current/BP-477597641-172.17.0.2-1732020604354/current, will proceed with Du for space computation calculation, 2024-11-19T12:50:05,883 WARN [Thread-2514 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data4/current/BP-477597641-172.17.0.2-1732020604354/current, will proceed with Du for space computation calculation, 2024-11-19T12:50:05,915 WARN [Thread-2489 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-19T12:50:05,917 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf38e3b47cc3ee7cf with lease ID 0x2bd6dfeb6e7e44a3: Processing first storage report for DS-887be72d-c25a-4356-b5ad-9ac9fd4c9ca0 from datanode DatanodeRegistration(127.0.0.1:45379, datanodeUuid=16ad879d-1c3e-4197-a156-cebd023a40d9, infoPort=40609, infoSecurePort=0, ipcPort=46089, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354) 2024-11-19T12:50:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf38e3b47cc3ee7cf with lease ID 0x2bd6dfeb6e7e44a3: from storage DS-887be72d-c25a-4356-b5ad-9ac9fd4c9ca0 node DatanodeRegistration(127.0.0.1:45379, datanodeUuid=16ad879d-1c3e-4197-a156-cebd023a40d9, infoPort=40609, infoSecurePort=0, ipcPort=46089, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:50:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf38e3b47cc3ee7cf with lease ID 0x2bd6dfeb6e7e44a3: Processing first storage report for DS-039415f6-abac-4dc5-8cb6-dc8f3edf4104 from datanode DatanodeRegistration(127.0.0.1:45379, datanodeUuid=16ad879d-1c3e-4197-a156-cebd023a40d9, infoPort=40609, infoSecurePort=0, ipcPort=46089, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354) 2024-11-19T12:50:05,918 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf38e3b47cc3ee7cf with lease ID 0x2bd6dfeb6e7e44a3: from storage DS-039415f6-abac-4dc5-8cb6-dc8f3edf4104 node DatanodeRegistration(127.0.0.1:45379, datanodeUuid=16ad879d-1c3e-4197-a156-cebd023a40d9, infoPort=40609, infoSecurePort=0, ipcPort=46089, storageInfo=lv=-57;cid=testClusterID;nsid=805583517;c=1732020604354), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-19T12:50:06,008 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435 2024-11-19T12:50:06,010 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/zookeeper_0, clientPort=54187, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-19T12:50:06,011 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54187 2024-11-19T12:50:06,012 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,013 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,022 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:50:06,023 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741825_1001 (size=7) 2024-11-19T12:50:06,024 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3 with version=8 2024-11-19T12:50:06,024 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44083/user/jenkins/test-data/1f0246bc-176b-87b5-b0fa-f42225afafbc/hbase-staging 2024-11-19T12:50:06,026 INFO [Time-limited test {}] client.ConnectionUtils(128): master/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:50:06,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,026 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:50:06,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,026 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:50:06,027 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-19T12:50:06,027 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:50:06,027 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34001 2024-11-19T12:50:06,028 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:34001 connecting to ZooKeeper ensemble=127.0.0.1:54187 2024-11-19T12:50:06,080 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:340010x0, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:50:06,080 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:34001-0x101546ea6ff0000 connected 2024-11-19T12:50:06,147 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,149 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,152 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:06,152 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3, hbase.cluster.distributed=false 2024-11-19T12:50:06,154 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:50:06,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-19T12:50:06,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34001 2024-11-19T12:50:06,155 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34001 2024-11-19T12:50:06,158 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-19T12:50:06,159 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34001 2024-11-19T12:50:06,175 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/aba5a916dfea:0 server-side Connection retries=45 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-19T12:50:06,175 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-19T12:50:06,176 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:40223 2024-11-19T12:50:06,177 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:40223 connecting to ZooKeeper ensemble=127.0.0.1:54187 2024-11-19T12:50:06,178 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,179 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,188 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:402230x0, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-19T12:50:06,188 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:402230x0, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:06,188 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:40223-0x101546ea6ff0001 connected 2024-11-19T12:50:06,189 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-19T12:50:06,189 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-19T12:50:06,189 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-19T12:50:06,190 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-19T12:50:06,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=40223 2024-11-19T12:50:06,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=40223 2024-11-19T12:50:06,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=40223 2024-11-19T12:50:06,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=40223 2024-11-19T12:50:06,191 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=40223 2024-11-19T12:50:06,202 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;aba5a916dfea:34001 2024-11-19T12:50:06,202 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:50:06,213 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:50:06,213 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-19T12:50:06,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,222 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-19T12:50:06,222 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/aba5a916dfea,34001,1732020606026 from backup master directory 2024-11-19T12:50:06,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:50:06,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-19T12:50:06,230 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:50:06,230 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,234 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/hbase.id] with ID: def24025-56d8-4574-85af-4e1688fd1510 2024-11-19T12:50:06,234 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/.tmp/hbase.id 2024-11-19T12:50:06,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:50:06,240 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741826_1002 (size=42) 2024-11-19T12:50:06,240 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/.tmp/hbase.id]:[hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/hbase.id] 2024-11-19T12:50:06,253 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:06,253 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-19T12:50:06,254 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-19T12:50:06,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,263 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:50:06,269 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741827_1003 (size=196) 2024-11-19T12:50:06,269 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-19T12:50:06,270 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-19T12:50:06,271 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:50:06,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:50:06,277 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741828_1004 (size=1189) 2024-11-19T12:50:06,278 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store 2024-11-19T12:50:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:50:06,284 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741829_1005 (size=34) 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:50:06,285 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:06,285 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:06,285 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020606285Disabling compacts and flushes for region at 1732020606285Disabling writes for close at 1732020606285Writing region close event to WAL at 1732020606285Closed at 1732020606285 2024-11-19T12:50:06,286 WARN [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/.initializing 2024-11-19T12:50:06,286 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/WALs/aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,288 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C34001%2C1732020606026, suffix=, logDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/WALs/aba5a916dfea,34001,1732020606026, archiveDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/oldWALs, maxLogs=10 2024-11-19T12:50:06,289 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C34001%2C1732020606026.1732020606289 2024-11-19T12:50:06,293 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/WALs/aba5a916dfea,34001,1732020606026/aba5a916dfea%2C34001%2C1732020606026.1732020606289 2024-11-19T12:50:06,294 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40609:40609),(127.0.0.1/127.0.0.1:40965:40965)] 2024-11-19T12:50:06,294 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:50:06,295 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:50:06,295 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,295 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,296 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-19T12:50:06,298 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:06,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:06,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:06,298 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,299 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-19T12:50:06,299 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:50:06,300 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,301 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-19T12:50:06,301 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:50:06,302 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-19T12:50:06,303 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,303 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-19T12:50:06,303 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,304 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,305 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,306 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,306 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,307 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-19T12:50:06,308 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-19T12:50:06,310 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:50:06,311 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770456, jitterRate=-0.020315319299697876}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-19T12:50:06,311 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732020606295Initializing all the Stores at 1732020606296 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020606296Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020606296Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020606296Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020606296Cleaning up temporary data from old regions at 1732020606306 (+10 ms)Region opened successfully at 1732020606311 (+5 ms) 2024-11-19T12:50:06,312 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-19T12:50:06,315 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4e87fb99, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:50:06,316 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-19T12:50:06,316 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-19T12:50:06,316 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-19T12:50:06,317 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-19T12:50:06,317 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-19T12:50:06,318 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-19T12:50:06,318 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-19T12:50:06,320 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-19T12:50:06,320 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-19T12:50:06,344 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-19T12:50:06,345 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-19T12:50:06,345 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-19T12:50:06,355 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-19T12:50:06,355 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-19T12:50:06,356 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-19T12:50:06,363 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-19T12:50:06,364 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-19T12:50:06,371 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-19T12:50:06,373 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-19T12:50:06,380 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-19T12:50:06,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:06,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:06,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,388 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,389 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=aba5a916dfea,34001,1732020606026, sessionid=0x101546ea6ff0000, setting cluster-up flag (Was=false) 2024-11-19T12:50:06,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,405 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,430 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-19T12:50:06,431 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,471 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-19T12:50:06,473 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=aba5a916dfea,34001,1732020606026 2024-11-19T12:50:06,474 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-19T12:50:06,476 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-19T12:50:06,476 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-19T12:50:06,476 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-19T12:50:06,476 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: aba5a916dfea,34001,1732020606026 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/aba5a916dfea:0, corePoolSize=5, maxPoolSize=5 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/aba5a916dfea:0, corePoolSize=10, maxPoolSize=10 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:50:06,478 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732020636479 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-19T12:50:06,479 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-19T12:50:06,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-19T12:50:06,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,480 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:50:06,480 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-19T12:50:06,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-19T12:50:06,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-19T12:50:06,480 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-19T12:50:06,481 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-19T12:50:06,481 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-19T12:50:06,481 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020606481,5,FailOnTimeoutGroup] 2024-11-19T12:50:06,481 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,481 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-19T12:50:06,482 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020606481,5,FailOnTimeoutGroup] 2024-11-19T12:50:06,482 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,482 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-19T12:50:06,482 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,482 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:50:06,488 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741831_1007 (size=1321) 2024-11-19T12:50:06,488 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-19T12:50:06,489 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3 2024-11-19T12:50:06,493 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(746): ClusterId : def24025-56d8-4574-85af-4e1688fd1510 2024-11-19T12:50:06,493 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-19T12:50:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:50:06,494 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741832_1008 (size=32) 2024-11-19T12:50:06,495 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:50:06,496 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:50:06,497 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:50:06,497 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:06,498 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:50:06,499 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:50:06,499 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:06,499 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:50:06,501 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:50:06,501 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:06,501 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:50:06,502 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:50:06,502 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:06,503 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:06,503 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:50:06,503 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-19T12:50:06,503 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-19T12:50:06,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740 2024-11-19T12:50:06,504 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740 2024-11-19T12:50:06,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:50:06,505 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:50:06,505 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:50:06,506 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:50:06,508 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-19T12:50:06,508 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863657, jitterRate=0.09819720685482025}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732020606495Initializing all the Stores at 1732020606495Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020606495Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020606496 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020606496Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020606496Cleaning up temporary data from old regions at 1732020606505 (+9 ms)Region opened successfully at 1732020606508 (+3 ms) 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:50:06,509 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:50:06,509 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:50:06,509 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020606509Disabling compacts and flushes for region at 1732020606509Disabling writes for close at 1732020606509Writing region close event to WAL at 1732020606509Closed at 1732020606509 2024-11-19T12:50:06,510 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:50:06,510 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-19T12:50:06,510 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-19T12:50:06,511 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:50:06,512 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-19T12:50:06,513 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-19T12:50:06,514 DEBUG [RS:0;aba5a916dfea:40223 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@86e870, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=aba5a916dfea/172.17.0.2:0 2024-11-19T12:50:06,525 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;aba5a916dfea:40223 2024-11-19T12:50:06,525 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-19T12:50:06,525 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-19T12:50:06,525 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-19T12:50:06,526 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(2659): reportForDuty to master=aba5a916dfea,34001,1732020606026 with port=40223, startcode=1732020606175 2024-11-19T12:50:06,526 DEBUG [RS:0;aba5a916dfea:40223 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-19T12:50:06,528 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:51161, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-19T12:50:06,528 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34001 {}] master.ServerManager(363): Checking decommissioned status of RegionServer aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,528 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34001 {}] master.ServerManager(517): Registering regionserver=aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,529 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3 2024-11-19T12:50:06,529 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:45633 2024-11-19T12:50:06,529 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-19T12:50:06,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:50:06,538 DEBUG [RS:0;aba5a916dfea:40223 {}] zookeeper.ZKUtil(111): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,538 WARN [RS:0;aba5a916dfea:40223 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-19T12:50:06,538 INFO [RS:0;aba5a916dfea:40223 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:50:06,538 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,539 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [aba5a916dfea,40223,1732020606175] 2024-11-19T12:50:06,541 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-19T12:50:06,543 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-19T12:50:06,544 INFO [RS:0;aba5a916dfea:40223 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-19T12:50:06,544 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,544 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-19T12:50:06,545 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-19T12:50:06,545 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/aba5a916dfea:0, corePoolSize=2, maxPoolSize=2 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,545 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,546 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,546 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/aba5a916dfea:0, corePoolSize=1, maxPoolSize=1 2024-11-19T12:50:06,546 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:50:06,546 DEBUG [RS:0;aba5a916dfea:40223 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/aba5a916dfea:0, corePoolSize=3, maxPoolSize=3 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,546 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40223,1732020606175-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:50:06,565 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-19T12:50:06,565 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,40223,1732020606175-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,565 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,565 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.Replication(171): aba5a916dfea,40223,1732020606175 started 2024-11-19T12:50:06,580 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:06,581 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1482): Serving as aba5a916dfea,40223,1732020606175, RpcServer on aba5a916dfea/172.17.0.2:40223, sessionid=0x101546ea6ff0001 2024-11-19T12:50:06,581 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-19T12:50:06,581 DEBUG [RS:0;aba5a916dfea:40223 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,581 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,40223,1732020606175' 2024-11-19T12:50:06,581 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-19T12:50:06,581 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member 'aba5a916dfea,40223,1732020606175' 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-19T12:50:06,582 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-19T12:50:06,583 DEBUG [RS:0;aba5a916dfea:40223 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-19T12:50:06,583 INFO [RS:0;aba5a916dfea:40223 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-19T12:50:06,583 INFO [RS:0;aba5a916dfea:40223 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-19T12:50:06,662 WARN [aba5a916dfea:34001 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-19T12:50:06,685 INFO [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40223%2C1732020606175, suffix=, logDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/aba5a916dfea,40223,1732020606175, archiveDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs, maxLogs=32 2024-11-19T12:50:06,685 INFO [RS:0;aba5a916dfea:40223 {}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40223%2C1732020606175.1732020606685 2024-11-19T12:50:06,692 INFO [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/aba5a916dfea,40223,1732020606175/aba5a916dfea%2C40223%2C1732020606175.1732020606685 2024-11-19T12:50:06,693 DEBUG [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40609:40609),(127.0.0.1/127.0.0.1:40965:40965)] 2024-11-19T12:50:06,913 DEBUG [aba5a916dfea:34001 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-19T12:50:06,913 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=aba5a916dfea,40223,1732020606175 2024-11-19T12:50:06,915 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,40223,1732020606175, state=OPENING 2024-11-19T12:50:06,938 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-19T12:50:06,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,996 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:06,997 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-19T12:50:06,997 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:50:06,997 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:50:06,997 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,40223,1732020606175}] 2024-11-19T12:50:07,151 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-19T12:50:07,153 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38451, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-19T12:50:07,156 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-19T12:50:07,157 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:50:07,159 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=aba5a916dfea%2C40223%2C1732020606175.meta, suffix=.meta, logDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/aba5a916dfea,40223,1732020606175, archiveDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs, maxLogs=32 2024-11-19T12:50:07,159 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor aba5a916dfea%2C40223%2C1732020606175.meta.1732020607159.meta 2024-11-19T12:50:07,175 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/aba5a916dfea,40223,1732020606175/aba5a916dfea%2C40223%2C1732020606175.meta.1732020607159.meta 2024-11-19T12:50:07,176 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40965:40965),(127.0.0.1/127.0.0.1:40609:40609)] 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-19T12:50:07,187 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-19T12:50:07,187 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-19T12:50:07,188 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-19T12:50:07,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-19T12:50:07,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-19T12:50:07,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:07,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:07,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-19T12:50:07,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-19T12:50:07,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:07,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:07,191 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-19T12:50:07,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-19T12:50:07,192 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:07,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:07,192 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-19T12:50:07,193 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-19T12:50:07,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-19T12:50:07,193 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-19T12:50:07,193 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-19T12:50:07,194 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740 2024-11-19T12:50:07,195 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740 2024-11-19T12:50:07,196 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-19T12:50:07,196 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-19T12:50:07,197 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-19T12:50:07,198 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-19T12:50:07,199 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=864073, jitterRate=0.09872615337371826}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-19T12:50:07,199 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-19T12:50:07,200 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732020607188Writing region info on filesystem at 1732020607188Initializing all the Stores at 1732020607188Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020607188Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020607189 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732020607189Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732020607189Cleaning up temporary data from old regions at 1732020607196 (+7 ms)Running coprocessor post-open hooks at 1732020607199 (+3 ms)Region opened successfully at 1732020607199 2024-11-19T12:50:07,201 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732020607150 2024-11-19T12:50:07,203 DEBUG [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-19T12:50:07,203 INFO [RS_OPEN_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-19T12:50:07,204 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=aba5a916dfea,40223,1732020606175 2024-11-19T12:50:07,205 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as aba5a916dfea,40223,1732020606175, state=OPEN 2024-11-19T12:50:07,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:50:07,280 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-19T12:50:07,280 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=aba5a916dfea,40223,1732020606175 2024-11-19T12:50:07,280 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:50:07,281 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-19T12:50:07,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-19T12:50:07,283 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=aba5a916dfea,40223,1732020606175 in 283 msec 2024-11-19T12:50:07,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-19T12:50:07,286 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 773 msec 2024-11-19T12:50:07,287 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-19T12:50:07,287 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-19T12:50:07,288 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:50:07,288 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,40223,1732020606175, seqNum=-1] 2024-11-19T12:50:07,288 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:50:07,290 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:46497, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:50:07,295 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 819 msec 2024-11-19T12:50:07,295 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732020607295, completionTime=-1 2024-11-19T12:50:07,296 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-19T12:50:07,296 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-19T12:50:07,298 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-19T12:50:07,298 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732020667298 2024-11-19T12:50:07,298 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732020727298 2024-11-19T12:50:07,298 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-19T12:50:07,298 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,299 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,42905,1732020417310/aba5a916dfea%2C42905%2C1732020417310.1732020417545 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:07,299 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:42615/user/jenkins/test-data/be1b7fe0-3385-db57-7bb0-1a3cb16493e0/WALs/aba5a916dfea,43765,1732020415622/aba5a916dfea%2C43765%2C1732020415622.meta.1732020417071.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-19T12:50:07,299 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-aba5a916dfea:34001, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,299 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,299 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,301 DEBUG [master/aba5a916dfea:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-19T12:50:07,303 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.073sec 2024-11-19T12:50:07,303 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-19T12:50:07,303 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-19T12:50:07,303 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-19T12:50:07,304 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-19T12:50:07,304 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-19T12:50:07,304 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-19T12:50:07,304 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-19T12:50:07,306 DEBUG [master/aba5a916dfea:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-19T12:50:07,306 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-19T12:50:07,306 INFO [master/aba5a916dfea:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=aba5a916dfea,34001,1732020606026-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-19T12:50:07,394 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@27c31367, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:50:07,394 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request aba5a916dfea,34001,-1 for getting cluster id 2024-11-19T12:50:07,394 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-19T12:50:07,395 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'def24025-56d8-4574-85af-4e1688fd1510' 2024-11-19T12:50:07,396 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-19T12:50:07,396 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "def24025-56d8-4574-85af-4e1688fd1510" 2024-11-19T12:50:07,396 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d106200, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:50:07,396 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [aba5a916dfea,34001,-1] 2024-11-19T12:50:07,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-19T12:50:07,397 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,398 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47120, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-19T12:50:07,398 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36aad325, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-19T12:50:07,399 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-19T12:50:07,400 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=aba5a916dfea,40223,1732020606175, seqNum=-1] 2024-11-19T12:50:07,400 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-19T12:50:07,401 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41730, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-19T12:50:07,403 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=aba5a916dfea,34001,1732020606026 2024-11-19T12:50:07,403 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-19T12:50:07,405 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-19T12:50:07,405 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-19T12:50:07,407 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1, archiveDir=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs, maxLogs=32 2024-11-19T12:50:07,407 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732020607407 2024-11-19T12:50:07,412 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732020607407 2024-11-19T12:50:07,412 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40965:40965),(127.0.0.1/127.0.0.1:40609:40609)] 2024-11-19T12:50:07,413 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732020607413 2024-11-19T12:50:07,419 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,419 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,419 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,419 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,419 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,419 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732020607407 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732020607413 2024-11-19T12:50:07,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:40609:40609),(127.0.0.1/127.0.0.1:40965:40965)] 2024-11-19T12:50:07,420 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732020607407 is not closed yet, will try archiving it next time 2024-11-19T12:50:07,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741835_1011 (size=93) 2024-11-19T12:50:07,421 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,421 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,421 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,421 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741835_1011 (size=93) 2024-11-19T12:50:07,421 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,422 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/WALs/test.com,8080,1/test.com%2C8080%2C1.1732020607407 to hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs/test.com%2C8080%2C1.1732020607407 2024-11-19T12:50:07,423 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741836_1012 (size=93) 2024-11-19T12:50:07,424 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741836_1012 (size=93) 2024-11-19T12:50:07,426 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs 2024-11-19T12:50:07,426 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732020607413) 2024-11-19T12:50:07,426 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-19T12:50:07,426 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:50:07,426 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:07,426 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,427 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,427 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-19T12:50:07,427 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-19T12:50:07,427 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=546352225, stopped=false 2024-11-19T12:50:07,427 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=aba5a916dfea,34001,1732020606026 2024-11-19T12:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-19T12:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:07,446 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:07,446 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:50:07,447 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-19T12:50:07,447 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:07,447 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:07,447 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server 'aba5a916dfea,40223,1732020606175' ***** 2024-11-19T12:50:07,447 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-19T12:50:07,447 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-19T12:50:07,447 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-19T12:50:07,447 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(959): stopping server aba5a916dfea,40223,1732020606175 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;aba5a916dfea:40223. 2024-11-19T12:50:07,448 DEBUG [RS:0;aba5a916dfea:40223 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-19T12:50:07,448 DEBUG [RS:0;aba5a916dfea:40223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-19T12:50:07,448 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-19T12:50:07,448 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-19T12:50:07,448 DEBUG [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-19T12:50:07,449 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-19T12:50:07,449 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-19T12:50:07,449 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-19T12:50:07,449 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-19T12:50:07,449 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-19T12:50:07,449 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-19T12:50:07,468 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/.tmp/ns/0a1f9568db3f4839b192470c4621a8b8 is 43, key is default/ns:d/1732020607290/Put/seqid=0 2024-11-19T12:50:07,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741837_1013 (size=5153) 2024-11-19T12:50:07,472 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741837_1013 (size=5153) 2024-11-19T12:50:07,473 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/.tmp/ns/0a1f9568db3f4839b192470c4621a8b8 2024-11-19T12:50:07,478 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/.tmp/ns/0a1f9568db3f4839b192470c4621a8b8 as hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/ns/0a1f9568db3f4839b192470c4621a8b8 2024-11-19T12:50:07,483 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/ns/0a1f9568db3f4839b192470c4621a8b8, entries=2, sequenceid=6, filesize=5.0 K 2024-11-19T12:50:07,484 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 35ms, sequenceid=6, compaction requested=false 2024-11-19T12:50:07,484 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-19T12:50:07,489 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-19T12:50:07,490 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-19T12:50:07,490 INFO [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-19T12:50:07,490 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732020607449Running coprocessor pre-close hooks at 1732020607449Disabling compacts and flushes for region at 1732020607449Disabling writes for close at 1732020607449Obtaining lock to block concurrent updates at 1732020607449Preparing flush snapshotting stores in 1588230740 at 1732020607449Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732020607449Flushing stores of hbase:meta,,1.1588230740 at 1732020607450 (+1 ms)Flushing 1588230740/ns: creating writer at 1732020607450Flushing 1588230740/ns: appending metadata at 1732020607467 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732020607467Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ce32bb2: reopening flushed file at 1732020607477 (+10 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 35ms, sequenceid=6, compaction requested=false at 1732020607484 (+7 ms)Writing region close event to WAL at 1732020607486 (+2 ms)Running coprocessor post-close hooks at 1732020607490 (+4 ms)Closed at 1732020607490 2024-11-19T12:50:07,490 DEBUG [RS_CLOSE_META-regionserver/aba5a916dfea:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-19T12:50:07,547 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-19T12:50:07,547 INFO [regionserver/aba5a916dfea:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-19T12:50:07,649 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(976): stopping server aba5a916dfea,40223,1732020606175; all regions closed. 2024-11-19T12:50:07,649 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,649 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,649 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,649 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,649 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741834_1010 (size=1152) 2024-11-19T12:50:07,651 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741834_1010 (size=1152) 2024-11-19T12:50:07,653 DEBUG [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs 2024-11-19T12:50:07,653 INFO [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C40223%2C1732020606175.meta:.meta(num 1732020607159) 2024-11-19T12:50:07,653 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,653 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,653 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,654 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,654 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741833_1009 (size=93) 2024-11-19T12:50:07,655 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741833_1009 (size=93) 2024-11-19T12:50:07,657 DEBUG [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/oldWALs 2024-11-19T12:50:07,657 INFO [RS:0;aba5a916dfea:40223 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog aba5a916dfea%2C40223%2C1732020606175:(num 1732020606685) 2024-11-19T12:50:07,657 DEBUG [RS:0;aba5a916dfea:40223 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-19T12:50:07,657 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.LeaseManager(133): Closed leases 2024-11-19T12:50:07,657 INFO [RS:0;aba5a916dfea:40223 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:50:07,658 INFO [RS:0;aba5a916dfea:40223 {}] hbase.ChoreService(370): Chore service for: regionserver/aba5a916dfea:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-19T12:50:07,658 INFO [RS:0;aba5a916dfea:40223 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:50:07,658 INFO [regionserver/aba5a916dfea:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:50:07,658 INFO [RS:0;aba5a916dfea:40223 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:40223 2024-11-19T12:50:07,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/aba5a916dfea,40223,1732020606175 2024-11-19T12:50:07,686 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-19T12:50:07,686 INFO [RS:0;aba5a916dfea:40223 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:50:07,696 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [aba5a916dfea,40223,1732020606175] 2024-11-19T12:50:07,704 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/aba5a916dfea,40223,1732020606175 already deleted, retry=false 2024-11-19T12:50:07,704 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; aba5a916dfea,40223,1732020606175 expired; onlineServers=0 2024-11-19T12:50:07,704 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master 'aba5a916dfea,34001,1732020606026' ***** 2024-11-19T12:50:07,704 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-19T12:50:07,704 INFO [M:0;aba5a916dfea:34001 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-19T12:50:07,705 INFO [M:0;aba5a916dfea:34001 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-19T12:50:07,705 DEBUG [M:0;aba5a916dfea:34001 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-19T12:50:07,705 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-19T12:50:07,705 DEBUG [M:0;aba5a916dfea:34001 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-19T12:50:07,705 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020606481 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.small.0-1732020606481,5,FailOnTimeoutGroup] 2024-11-19T12:50:07,705 DEBUG [master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020606481 {}] cleaner.HFileCleaner(306): Exit Thread[master/aba5a916dfea:0:becomeActiveMaster-HFileCleaner.large.0-1732020606481,5,FailOnTimeoutGroup] 2024-11-19T12:50:07,705 INFO [M:0;aba5a916dfea:34001 {}] hbase.ChoreService(370): Chore service for: master/aba5a916dfea:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-19T12:50:07,705 INFO [M:0;aba5a916dfea:34001 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-19T12:50:07,705 DEBUG [M:0;aba5a916dfea:34001 {}] master.HMaster(1795): Stopping service threads 2024-11-19T12:50:07,705 INFO [M:0;aba5a916dfea:34001 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-19T12:50:07,705 INFO [M:0;aba5a916dfea:34001 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-19T12:50:07,706 INFO [M:0;aba5a916dfea:34001 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-19T12:50:07,706 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-19T12:50:07,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-19T12:50:07,771 DEBUG [M:0;aba5a916dfea:34001 {}] zookeeper.ZKUtil(347): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-19T12:50:07,771 WARN [M:0;aba5a916dfea:34001 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-19T12:50:07,771 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-19T12:50:07,772 INFO [M:0;aba5a916dfea:34001 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/.lastflushedseqids 2024-11-19T12:50:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741838_1014 (size=99) 2024-11-19T12:50:07,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741838_1014 (size=99) 2024-11-19T12:50:07,777 INFO [M:0;aba5a916dfea:34001 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-19T12:50:07,777 INFO [M:0;aba5a916dfea:34001 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-19T12:50:07,777 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-19T12:50:07,777 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:07,777 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:07,777 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-19T12:50:07,777 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:07,777 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-19T12:50:07,793 DEBUG [M:0;aba5a916dfea:34001 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b004a85e73b493897f701c70f459b51 is 82, key is hbase:meta,,1/info:regioninfo/1732020607204/Put/seqid=0 2024-11-19T12:50:07,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:07,796 INFO [RS:0;aba5a916dfea:40223 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:50:07,796 INFO [RS:0;aba5a916dfea:40223 {}] regionserver.HRegionServer(1031): Exiting; stopping=aba5a916dfea,40223,1732020606175; zookeeper connection closed. 2024-11-19T12:50:07,796 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:40223-0x101546ea6ff0001, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:07,796 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@56e325ab {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@56e325ab 2024-11-19T12:50:07,797 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-19T12:50:07,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741839_1015 (size=5672) 2024-11-19T12:50:07,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741839_1015 (size=5672) 2024-11-19T12:50:07,798 INFO [M:0;aba5a916dfea:34001 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b004a85e73b493897f701c70f459b51 2024-11-19T12:50:07,816 DEBUG [M:0;aba5a916dfea:34001 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/517e756660bb4ef9b50296405fbfa966 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732020607294/Put/seqid=0 2024-11-19T12:50:07,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741840_1016 (size=5275) 2024-11-19T12:50:07,820 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741840_1016 (size=5275) 2024-11-19T12:50:07,820 INFO [M:0;aba5a916dfea:34001 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/517e756660bb4ef9b50296405fbfa966 2024-11-19T12:50:07,838 DEBUG [M:0;aba5a916dfea:34001 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51c0789582e14c9ca51c2772f479838f is 69, key is aba5a916dfea,40223,1732020606175/rs:state/1732020606528/Put/seqid=0 2024-11-19T12:50:07,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741841_1017 (size=5156) 2024-11-19T12:50:07,842 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741841_1017 (size=5156) 2024-11-19T12:50:07,843 INFO [M:0;aba5a916dfea:34001 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51c0789582e14c9ca51c2772f479838f 2024-11-19T12:50:07,861 DEBUG [M:0;aba5a916dfea:34001 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/729db0b8505447bb8306e6a87c5dd613 is 52, key is load_balancer_on/state:d/1732020607404/Put/seqid=0 2024-11-19T12:50:07,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741842_1018 (size=5056) 2024-11-19T12:50:07,865 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741842_1018 (size=5056) 2024-11-19T12:50:07,865 INFO [M:0;aba5a916dfea:34001 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/729db0b8505447bb8306e6a87c5dd613 2024-11-19T12:50:07,869 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/5b004a85e73b493897f701c70f459b51 as hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b004a85e73b493897f701c70f459b51 2024-11-19T12:50:07,874 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/5b004a85e73b493897f701c70f459b51, entries=8, sequenceid=29, filesize=5.5 K 2024-11-19T12:50:07,875 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/517e756660bb4ef9b50296405fbfa966 as hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/517e756660bb4ef9b50296405fbfa966 2024-11-19T12:50:07,879 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/517e756660bb4ef9b50296405fbfa966, entries=3, sequenceid=29, filesize=5.2 K 2024-11-19T12:50:07,880 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/51c0789582e14c9ca51c2772f479838f as hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/51c0789582e14c9ca51c2772f479838f 2024-11-19T12:50:07,885 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/51c0789582e14c9ca51c2772f479838f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-19T12:50:07,885 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/729db0b8505447bb8306e6a87c5dd613 as hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/729db0b8505447bb8306e6a87c5dd613 2024-11-19T12:50:07,890 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:45633/user/jenkins/test-data/f1ee7832-04d5-7c5d-fbf1-801ea68c0ce3/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/729db0b8505447bb8306e6a87c5dd613, entries=1, sequenceid=29, filesize=4.9 K 2024-11-19T12:50:07,891 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false 2024-11-19T12:50:07,893 INFO [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-19T12:50:07,893 DEBUG [M:0;aba5a916dfea:34001 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732020607777Disabling compacts and flushes for region at 1732020607777Disabling writes for close at 1732020607777Obtaining lock to block concurrent updates at 1732020607777Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732020607777Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732020607778 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732020607778Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732020607778Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732020607793 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732020607793Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732020607802 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732020607815 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732020607815Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732020607825 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732020607837 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732020607837Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732020607847 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732020607860 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732020607861 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7014dfc8: reopening flushed file at 1732020607869 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6abed2e2: reopening flushed file at 1732020607874 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2ce0ef81: reopening flushed file at 1732020607879 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@17e99053: reopening flushed file at 1732020607885 (+6 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 114ms, sequenceid=29, compaction requested=false at 1732020607891 (+6 ms)Writing region close event to WAL at 1732020607893 (+2 ms)Closed at 1732020607893 2024-11-19T12:50:07,893 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,894 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,894 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,894 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,894 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-19T12:50:07,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41851 is added to blk_1073741830_1006 (size=10311) 2024-11-19T12:50:07,896 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45379 is added to blk_1073741830_1006 (size=10311) 2024-11-19T12:50:07,896 INFO [M:0;aba5a916dfea:34001 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-19T12:50:07,896 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-19T12:50:07,896 INFO [M:0;aba5a916dfea:34001 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34001 2024-11-19T12:50:07,897 INFO [M:0;aba5a916dfea:34001 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-19T12:50:08,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:08,011 INFO [M:0;aba5a916dfea:34001 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-19T12:50:08,011 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:34001-0x101546ea6ff0000, quorum=127.0.0.1:54187, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-19T12:50:08,014 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2def2572{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:08,015 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@48189acf{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:08,015 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:08,015 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2c01fadd{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:08,016 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5fb67b12{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:08,017 WARN [BP-477597641-172.17.0.2-1732020604354 heartbeating to localhost/127.0.0.1:45633 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:50:08,017 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:50:08,017 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:50:08,017 WARN [BP-477597641-172.17.0.2-1732020604354 heartbeating to localhost/127.0.0.1:45633 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-477597641-172.17.0.2-1732020604354 (Datanode Uuid 16ad879d-1c3e-4197-a156-cebd023a40d9) service to localhost/127.0.0.1:45633 2024-11-19T12:50:08,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data3/current/BP-477597641-172.17.0.2-1732020604354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:08,018 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data4/current/BP-477597641-172.17.0.2-1732020604354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:08,019 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:50:08,021 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d4cb733{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-19T12:50:08,022 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@45f7f9ff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:08,022 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:08,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3bcfd91f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:08,022 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3f0aa271{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:08,023 WARN [BP-477597641-172.17.0.2-1732020604354 heartbeating to localhost/127.0.0.1:45633 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-19T12:50:08,023 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-19T12:50:08,023 WARN [BP-477597641-172.17.0.2-1732020604354 heartbeating to localhost/127.0.0.1:45633 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-477597641-172.17.0.2-1732020604354 (Datanode Uuid eb0f41f7-c3e6-4345-b63a-3ba0a4c37eb7) service to localhost/127.0.0.1:45633 2024-11-19T12:50:08,023 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-19T12:50:08,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data1/current/BP-477597641-172.17.0.2-1732020604354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:08,024 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/cluster_5d0a07f5-e7bc-647d-2cee-49d676ee2593/data/data2/current/BP-477597641-172.17.0.2-1732020604354 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-19T12:50:08,024 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-19T12:50:08,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@66d38df4{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-19T12:50:08,030 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c6d62fb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-19T12:50:08,030 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-19T12:50:08,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3d03122a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-19T12:50:08,030 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@224b825{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/82fa255a-ed6d-4653-e01b-c719671e9435/hadoop.log.dir/,STOPPED} 2024-11-19T12:50:08,036 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-19T12:50:08,050 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-19T12:50:08,059 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=269 (was 230) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45633 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45633 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45633 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45633 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:45633 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:45633 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (1805219993) connection to localhost/127.0.0.1:45633 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: LeaseRenewer:jenkins@localhost:45633 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=532 (was 515) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=250 (was 254), ProcessCount=11 (was 11), AvailableMemoryMB=5374 (was 5380)